code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( _snake_case ):
"""simple docstring"""
__UpperCamelCase = """Speech2TextFeatureExtractor"""
__UpperCamelCase = """Speech2TextTokenizer"""
def __init__( self :List[str] , lowercase_ :Optional[Any] , lowercase_ :Tuple ) -> List[str]:
super().__init__(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = self.feature_extractor
UpperCAmelCase = False
def __call__( self :Union[str, Any] , *lowercase_ :str , **lowercase_ :Tuple ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
UpperCAmelCase = kwargs.pop('raw_speech' )
else:
UpperCAmelCase = kwargs.pop('audio' , _lowerCamelCase )
UpperCAmelCase = kwargs.pop('sampling_rate' , _lowerCamelCase )
UpperCAmelCase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
UpperCAmelCase = args[0]
UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
UpperCAmelCase = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
UpperCAmelCase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase = encodings['''input_ids''']
return inputs
def UpperCAmelCase__ ( self :str , *lowercase_ :str , **lowercase_ :str ) -> List[Any]:
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase__ ( self :Optional[Any] , *lowercase_ :Any , **lowercase_ :List[Any] ) -> int:
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer
yield
UpperCAmelCase = self.feature_extractor
UpperCAmelCase = False
| 78
|
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a :List[Any] = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a :Dict = frequencies_dict
if not case_sensitive:
a :Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
a :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase_ ) ):
a :int = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a :List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a :int = decrypted_with_shift.count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a :Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a :int = min(
UpperCAmelCase_ , key=UpperCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a
) , (
a
) ,
) :Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 94
| 0
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : int = CodeGenTokenizer
def __init__( self : List[Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : int="<|endoftext|>" , __UpperCAmelCase : Dict="<|endoftext|>" , __UpperCAmelCase : str="<|endoftext|>" , __UpperCAmelCase : List[str]=False , **__UpperCAmelCase : Union[str, Any] , ):
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
if kwargs.pop("add_bos_token" , __UpperCAmelCase):
a : Union[str, Any] = kwargs.pop("name_or_path" , "")
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly.")
a : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space:
a : str = getattr(__UpperCAmelCase , pre_tok_state.pop("type"))
a : Dict = add_prefix_space
a : str = pre_tok_class(**__UpperCAmelCase)
a : Optional[int] = add_prefix_space
def __snake_case ( self : Optional[Any] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Any):
a : List[str] = kwargs.get("is_split_into_words" , __UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Dict , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : List[str]):
a : List[str] = kwargs.get("is_split_into_words" , __UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
a : Any = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[List[str]] = None , **__UpperCAmelCase : Union[str, Any] , ):
a : Optional[int] = super().decode(
token_ids=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , )
if truncate_before_pattern is not None and len(__UpperCAmelCase) > 0:
a : Any = self.truncate(__UpperCAmelCase , __UpperCAmelCase)
return decoded_text
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any):
def find_re(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any]):
a : int = pattern.search(__UpperCAmelCase , __UpperCAmelCase)
return m.start() if m else -1
a : List[Any] = [re.compile(__UpperCAmelCase , re.MULTILINE) for pattern in truncate_before_pattern]
a : List[Any] = list(re.finditer("^print" , __UpperCAmelCase , re.MULTILINE))
if len(__UpperCAmelCase) > 1:
a : int = completion[: prints[1].start()]
a : Optional[int] = list(re.finditer("^def" , __UpperCAmelCase , re.MULTILINE))
if len(__UpperCAmelCase) > 1:
a : Optional[Any] = completion[: defs[1].start()]
a : int = 0
a : List[Any] = [
pos for pos in [find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) for terminal in terminals] if pos != -1
]
if len(__UpperCAmelCase) > 0:
return completion[: min(__UpperCAmelCase)]
else:
return completion
| 226
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """longformer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[List[int], int] = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 30522 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 3072 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1e-12 , __UpperCAmelCase : bool = False , **__UpperCAmelCase : int , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = attention_window
a : List[str] = sep_token_id
a : List[Any] = bos_token_id
a : int = eos_token_id
a : Union[str, Any] = vocab_size
a : Dict = hidden_size
a : int = num_hidden_layers
a : int = num_attention_heads
a : Optional[int] = hidden_act
a : Union[str, Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Dict = type_vocab_size
a : Optional[Any] = initializer_range
a : List[Any] = layer_norm_eps
a : List[str] = onnx_export
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : "PretrainedConfig" , __UpperCAmelCase : str = "default" , __UpperCAmelCase : "List[PatchingSpec]" = None):
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = True
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def __snake_case ( self : Union[str, Any]):
a : str = super().outputs
if self.task == "default":
a : Tuple = {0: "batch"}
return outputs
@property
def __snake_case ( self : Optional[int]):
return 1e-4
@property
def __snake_case ( self : str):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def __snake_case ( self : List[str] , __UpperCAmelCase : "PreTrainedTokenizerBase" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
a : str = super().generate_dummy_inputs(
preprocessor=__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a : Union[str, Any] = torch.zeros_like(inputs["input_ids"])
# make every second token global
a : Dict = 1
return inputs
| 226
| 1
|
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = ""
for i in table:
res += inp[i - 1]
return res
def _lowerCAmelCase (_lowerCAmelCase):
return data[1:] + data[0]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = ""
for i in range(len(_lowerCAmelCase)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = int("0b" + data[0] + data[-1] , 2)
UpperCamelCase_ = int("0b" + data[1:3] , 2)
return bin(s[row][col])[2:]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = message[:4]
UpperCamelCase_ = message[4:]
UpperCamelCase_ = apply_table(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = xor(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = apply_sbox(_lowerCAmelCase , temp[:4]) # noqa: E741
UpperCamelCase_ = apply_sbox(_lowerCAmelCase , temp[4:])
UpperCamelCase_ = "0" * (2 - len(_lowerCAmelCase)) + l # noqa: E741
UpperCamelCase_ = "0" * (2 - len(_lowerCAmelCase)) + r
UpperCamelCase_ = apply_table(l + r , _lowerCAmelCase)
UpperCamelCase_ = xor(_lowerCAmelCase , _lowerCAmelCase)
return temp + right
if __name__ == "__main__":
UpperCAmelCase : int =input("""Enter 10 bit key: """)
UpperCAmelCase : int =input("""Enter 8 bit message: """)
UpperCAmelCase : Tuple =[6, 3, 7, 4, 8, 5, 10, 9]
UpperCAmelCase : List[Any] =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCAmelCase : Optional[Any] =[2, 4, 3, 1]
UpperCAmelCase : Optional[Any] =[2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase : List[str] =[4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase : str =[4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase : int =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase : Optional[int] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase : Tuple =apply_table(key, paa_table)
UpperCAmelCase : Tuple =temp[:5]
UpperCAmelCase : Optional[int] =temp[5:]
UpperCAmelCase : Union[str, Any] =left_shift(left)
UpperCAmelCase : str =left_shift(right)
UpperCAmelCase : Dict =apply_table(left + right, pa_table)
UpperCAmelCase : Optional[Any] =left_shift(left)
UpperCAmelCase : Any =left_shift(right)
UpperCAmelCase : int =left_shift(left)
UpperCAmelCase : str =left_shift(right)
UpperCAmelCase : Any =apply_table(left + right, pa_table)
# encryption
UpperCAmelCase : str =apply_table(message, IP)
UpperCAmelCase : List[Any] =function(expansion, sa, sa, keya, temp)
UpperCAmelCase : Tuple =temp[4:] + temp[:4]
UpperCAmelCase : Optional[Any] =function(expansion, sa, sa, keya, temp)
UpperCAmelCase : Union[str, Any] =apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCAmelCase : Dict =apply_table(CT, IP)
UpperCAmelCase : Any =function(expansion, sa, sa, keya, temp)
UpperCAmelCase : Optional[int] =temp[4:] + temp[:4]
UpperCAmelCase : Optional[Any] =function(expansion, sa, sa, keya, temp)
UpperCAmelCase : str =apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 128
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase : List[Any] =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
return [
int(10_00 * (box[0] / width)),
int(10_00 * (box[1] / height)),
int(10_00 * (box[2] / width)),
int(10_00 * (box[3] / height)),
]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None):
UpperCamelCase_ = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase_ = to_pil_image(_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ = pil_image.size
UpperCamelCase_ = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="dict" , config=_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase_ = [idx for idx, word in enumerate(_lowerCAmelCase) if not word.strip()]
UpperCamelCase_ = [word for idx, word in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase_ = []
for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(_lowerCAmelCase)
# finally, normalize the bounding boxes
UpperCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase))
assert len(_lowerCAmelCase) == len(_lowerCAmelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = apply_ocr
UpperCamelCase_ = ocr_lang
UpperCamelCase_ = tesseract_config
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase_ = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase_ = []
UpperCamelCase_ = []
for image in images:
UpperCamelCase_ , UpperCamelCase_ = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase_ = [flip_channel_order(snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
if apply_ocr:
UpperCamelCase_ = words_batch
UpperCamelCase_ = boxes_batch
return data
| 128
| 1
|
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCamelCase ( lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = emb.weight.shape
UpperCAmelCase_ : Dict = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
UpperCAmelCase_ : Any = emb.weight.data
return lin_layer
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
UpperCAmelCase_ : List[str] = Namespace(**checkpoint['cfg']['model'] )
UpperCAmelCase_ : int = checkpoint['model']
remove_ignore_keys_(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCAmelCase_ : List[Any] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
UpperCAmelCase_ : str = XGLMConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : Dict = XGLMForCausalLM(lowerCamelCase_ )
UpperCAmelCase_ : int = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
print(lowerCamelCase_ )
UpperCAmelCase_ : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case__ : int = parser.parse_args()
snake_case__ : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 274
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ = "cpu" , snake_case_ = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
UpperCAmelCase_ : Any = device
UpperCAmelCase_ : Tuple = CLIPTokenizerFast.from_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[Any] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
UpperCAmelCase_ : Union[str, Any] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
UpperCAmelCase_ : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase_ : Optional[Any] = torchvision.transforms.Resize(2_2_4 )
UpperCAmelCase_ : Any = torchvision.transforms.CenterCrop(2_2_4 )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.resize(snake_case_ )
UpperCAmelCase_ : Tuple = self.center_crop(snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.normalize(snake_case_ )
return images
def __call__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = self.tokenizer(text=snake_case_ , **snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.preprocess_img(snake_case_ )
UpperCAmelCase_ : Optional[int] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=1_0 , snake_case_=0.01 , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_="image" , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = device if device else get_device()
if vqgan:
UpperCAmelCase_ : Any = vqgan
else:
UpperCAmelCase_ : Dict = load_vqgan(self.device , conf_path=snake_case_ , ckpt_path=snake_case_ )
self.vqgan.eval()
if clip:
UpperCAmelCase_ : List[str] = clip
else:
UpperCAmelCase_ : List[Any] = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCAmelCase_ : Tuple = ProcessorGradientFlow(device=self.device )
UpperCAmelCase_ : Dict = iterations
UpperCAmelCase_ : Dict = lr
UpperCAmelCase_ : str = log
UpperCAmelCase_ : Tuple = make_grid
UpperCAmelCase_ : Union[str, Any] = return_val
UpperCAmelCase_ : List[Any] = quantize
UpperCAmelCase_ : int = self.vqgan.decoder.z_shape
def _UpperCamelCase ( self , snake_case_=None , snake_case_=None , snake_case_=5 , snake_case_=True ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
if output_path is None:
UpperCAmelCase_ : List[str] = './animation.gif'
if input_path is None:
UpperCAmelCase_ : List[str] = self.save_path
UpperCAmelCase_ : List[str] = sorted(glob(input_path + '/*' ) )
if not len(snake_case_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(snake_case_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCAmelCase_ : Tuple = total_duration / len(snake_case_ )
UpperCAmelCase_ : str = [frame_duration] * len(snake_case_ )
if extend_frames:
UpperCAmelCase_ : List[str] = 1.5
UpperCAmelCase_ : Any = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(snake_case_ ) )
imageio.mimsave(snake_case_ , snake_case_ , duration=snake_case_ )
print(F'''gif saved to {output_path}''' )
def _UpperCamelCase ( self , snake_case_=None , snake_case_=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCAmelCase_ : Optional[Any] = preprocess(Image.open(snake_case_ ) , target_image_size=2_5_6 ).to(self.device )
UpperCAmelCase_ : Dict = preprocess_vqgan(snake_case_ )
UpperCAmelCase_ , *UpperCAmelCase_ : Tuple = self.vqgan.encode(snake_case_ )
return z
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.latent.detach().requires_grad_()
UpperCAmelCase_ : List[Any] = base_latent + transform_vector
if self.quantize:
UpperCAmelCase_ , *UpperCAmelCase_ : Tuple = self.vqgan.quantize(snake_case_ )
else:
UpperCAmelCase_ : Optional[int] = trans_latent
return self.vqgan.decode(snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None ):
'''simple docstring'''
UpperCAmelCase_ : int = self.clip_preprocessor(text=snake_case_ , images=snake_case_ , return_tensors='pt' , padding=snake_case_ )
UpperCAmelCase_ : Any = self.clip(**snake_case_ )
UpperCAmelCase_ : Dict = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase_ : Union[str, Any] = similarity_logits * weights
return similarity_logits.sum()
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self._get_clip_similarity(pos_prompts['prompts'] , snake_case_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCAmelCase_ : List[Any] = self._get_clip_similarity(neg_prompts['prompts'] , snake_case_ , weights=neg_prompts['weights'] )
else:
UpperCAmelCase_ : Union[str, Any] = torch.tensor([1] , device=self.device )
UpperCAmelCase_ : Dict = -torch.log(snake_case_ ) + torch.log(snake_case_ )
return loss
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = torch.randn_like(self.latent , requires_grad=snake_case_ , device=self.device )
UpperCAmelCase_ : int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase_ : Dict = self._add_vector(snake_case_ )
UpperCAmelCase_ : List[Any] = loop_post_process(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self._get_CLIP_loss(snake_case_ , snake_case_ , snake_case_ )
print('CLIP loss' , snake_case_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=snake_case_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
wandb.init(reinit=snake_case_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCAmelCase_ : str = Image.open(snake_case_ )
UpperCAmelCase_ : str = image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if not prompts:
return []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[int] = []
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(snake_case_ , (tuple, list) ):
UpperCAmelCase_ : Tuple = prompt[0]
UpperCAmelCase_ : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase_ , UpperCAmelCase_ : int = prompt.split(':' )
UpperCAmelCase_ : List[str] = float(snake_case_ )
else:
UpperCAmelCase_ : Optional[int] = prompt
UpperCAmelCase_ : List[str] = 1.0
processed_prompts.append(snake_case_ )
weights.append(snake_case_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(snake_case_ , device=self.device ),
}
def _UpperCamelCase ( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=None , ):
'''simple docstring'''
if image_path:
UpperCAmelCase_ : List[Any] = self._get_latent(snake_case_ )
else:
UpperCAmelCase_ : Any = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(snake_case_ , snake_case_ , snake_case_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase_ : Optional[int] = self.process_prompts(snake_case_ )
UpperCAmelCase_ : int = self.process_prompts(snake_case_ )
if save_final and save_path is None:
UpperCAmelCase_ : Union[str, Any] = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(snake_case_ ):
os.makedirs(snake_case_ )
else:
UpperCAmelCase_ : Any = save_path + '_' + get_timestamp()
os.makedirs(snake_case_ )
UpperCAmelCase_ : List[Any] = save_path
UpperCAmelCase_ : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(snake_case_ ) )
UpperCAmelCase_ : Optional[int] = loop_post_process(snake_case_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case_ , snake_case_ , snake_case_ ) ):
if show_intermediate:
show_pil(snake_case_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'Image': wandb.Image(snake_case_ )} )
if show_final:
show_pil(snake_case_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 274
| 1
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A : List[Any] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = 'pt'
elif is_tf_available():
__UpperCAmelCase = 'tf'
else:
__UpperCAmelCase = 'jax'
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = PerceiverTokenizer
_snake_case : List[Any] = False
def __UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
UpperCAmelCase_ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self ) -> str:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=2_0 , _UpperCamelCase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase_ : List[str] = []
for i in range(len(_UpperCamelCase ) ):
try:
UpperCAmelCase_ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase_ : str = list(filter(lambda _UpperCamelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _UpperCamelCase ) )
UpperCAmelCase_ : Optional[Any] = list(filter(lambda _UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCamelCase ) , _UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
UpperCAmelCase_ : Optional[int] = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
UpperCAmelCase_ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ : List[Any] = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
UpperCAmelCase_ : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCamelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
UpperCAmelCase_ : Any = ' ' + output_txt
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = self.perceiver_tokenizer
UpperCAmelCase_ : Union[str, Any] = 'Unicode €.'
UpperCAmelCase_ : List[str] = tokenizer(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , _UpperCamelCase )
# decoding
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '[CLS]Unicode €.[SEP]' )
UpperCAmelCase_ : int = tokenizer('e è é ê ë' )
UpperCAmelCase_ : Tuple = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , _UpperCamelCase )
# decoding
UpperCAmelCase_ : List[str] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = self.perceiver_tokenizer
UpperCAmelCase_ : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
UpperCAmelCase_ : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase_ : Tuple = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
if FRAMEWORK != "jax":
UpperCAmelCase_ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase_ : Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.perceiver_tokenizer
UpperCAmelCase_ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCAmelCase_ : str = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCamelCase )
self.assertIn('attention_mask' , _UpperCamelCase )
self.assertNotIn('decoder_input_ids' , _UpperCamelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.perceiver_tokenizer
UpperCAmelCase_ : Tuple = [
'Summary of the text.',
'Another summary.',
]
UpperCAmelCase_ : str = tokenizer(
text_target=_UpperCamelCase , max_length=3_2 , padding='max_length' , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self ) -> List[str]:
# safety check on max_len default value so we are sure the test works
UpperCAmelCase_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
UpperCAmelCase_ : Any = ' He is very happy, UNwant\u00E9d,running'
UpperCAmelCase_ : Dict = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.__class__.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : List[str] = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
UpperCAmelCase_ : str = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
UpperCAmelCase_ : Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
UpperCAmelCase_ : Tuple = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.__class__.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase_ : str = tokenizer.__class__.from_pretrained(_UpperCamelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
UpperCAmelCase_ : int = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
UpperCAmelCase_ : Union[str, Any] = json.load(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase_ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
UpperCAmelCase_ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCamelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ : Union[str, Any] = tokenizer_class.from_pretrained(
_UpperCamelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCamelCase )]
UpperCAmelCase_ : List[Any] = tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> Dict:
pass
def __UpperCAmelCase ( self ) -> Dict:
pass
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizers(fast=_UpperCamelCase , do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ : int = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
UpperCAmelCase_ : Dict = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
| 351
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
UpperCAmelCase_ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
for example in examples:
UpperCAmelCase_ : str = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
] , )
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCAmelCase_ : str = pipeline(
'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
UpperCAmelCase_ : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ) -> Dict:
pass
| 145
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase : Union[str, Any] = 3
def __magic_name__ ( A : int ):
'''simple docstring'''
print("Generating primitive root of p" )
while True:
a = random.randrange(3, A )
if pow(A, 2, A ) == 1:
continue
if pow(A, A, A ) == 1:
continue
return g
def __magic_name__ ( A : int ):
'''simple docstring'''
print("Generating prime p..." )
a = rabin_miller.generate_large_prime(A ) # select large prime number.
a = primitive_root(A ) # one primitive root on modulo p.
a = random.randrange(3, A ) # private_key -> have to be greater than 2 for safety.
a = cryptomath.find_mod_inverse(pow(A, A, A ), A )
a = (key_size, e_a, e_a, p)
a = (key_size, d)
return public_key, private_key
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
a , a = generate_key(A )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""", "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""", "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __magic_name__ ( ):
'''simple docstring'''
print("Making key files..." )
make_key_files("elgamal", 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 107
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( A : NDArray[floataa], A : NDArray[floataa], A : list[int], A : int, ):
'''simple docstring'''
a , a = coefficient_matrix.shape
a , a = constant_matrix.shape
if rowsa != colsa:
a = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
a = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
a = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
a = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
a = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
a , a = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
a = []
for row in range(A ):
a = 0
for col in range(A ):
if col == row:
a = table[row][col]
elif col == cols - 1:
a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a = (temp + val) / denom
new_val.append(A )
a = new_val
return [float(A ) for i in new_val]
def __magic_name__ ( A : NDArray[floataa] ):
'''simple docstring'''
a , a = table.shape
a = True
for i in range(0, A ):
a = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
SCREAMING_SNAKE_CASE_ = {'facebook/blenderbot-3B': 128}
class a ( UpperCAmelCase ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["input_ids", "attention_mask"]
_lowercase = BlenderbotTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
_UpperCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A_ ) != add_prefix_space:
_UpperCAmelCase : List[str] = getattr(A_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase : List[Any] = add_prefix_space
_UpperCAmelCase : str = pre_tok_class(**A_ )
_UpperCAmelCase : Optional[Any] = add_prefix_space
_UpperCAmelCase : Tuple = "post_processor"
_UpperCAmelCase : Any = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
_UpperCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
_UpperCAmelCase : str = False
if state.get("add_prefix_space" , A_ ) != add_prefix_space:
_UpperCAmelCase : List[Any] = add_prefix_space
_UpperCAmelCase : Optional[Any] = True
if state.get("trim_offsets" , A_ ) != trim_offsets:
_UpperCAmelCase : List[Any] = trim_offsets
_UpperCAmelCase : Optional[int] = True
if changes_to_apply:
_UpperCAmelCase : Tuple = getattr(A_ , state.pop("type" ) )
_UpperCAmelCase : str = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
_UpperCAmelCase : Union[str, Any] = value
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = kwargs.get("is_split_into_words" , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
_UpperCAmelCase : List[str] = " ".join(A_ )
_UpperCAmelCase : List[Any] = self.encode(A_ )
if len(A_ ) > self.model_max_length:
_UpperCAmelCase : int = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 189
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class a ( UpperCAmelCase ):
def __get__( self , A_ , A_=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
_UpperCAmelCase : Optional[int] = "__cached_" + self.fget.__name__
_UpperCAmelCase : Union[str, Any] = getattr(A_ , A_ , A_ )
if cached is None:
_UpperCAmelCase : Dict = self.fget(A_ )
setattr(A_ , A_ , A_ )
return cached
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> int:
_UpperCAmelCase : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> int:
if is_torch_fx_proxy(lowerCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Dict:
return isinstance(lowerCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> Any:
return _is_numpy(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Optional[int]:
import torch
return isinstance(lowerCAmelCase , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> List[Any]:
import torch
return isinstance(lowerCAmelCase , torch.device )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Tuple:
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Tuple:
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , lowerCAmelCase ):
_UpperCAmelCase : Any = getattr(lowerCAmelCase , lowerCAmelCase )
else:
return False
return isinstance(lowerCAmelCase , torch.dtype )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> int:
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> Optional[Any]:
import tensorflow as tf
return isinstance(lowerCAmelCase , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Optional[Any]:
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCAmelCase )
return type(lowerCAmelCase ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Optional[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[str]:
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> str:
return False if not is_flax_available() else _is_jax(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Tuple:
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return [to_py_obj(lowerCAmelCase ) for o in obj]
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase ).tolist()
elif isinstance(lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[Any]:
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return np.array(lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase )
else:
return obj
class a ( UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
_UpperCAmelCase : Any = getattr(self , class_fields[0].name )
_UpperCAmelCase : List[str] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = first_field.items()
_UpperCAmelCase : Optional[int] = True
else:
try:
_UpperCAmelCase : Tuple = iter(A_ )
_UpperCAmelCase : Any = True
except TypeError:
_UpperCAmelCase : str = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ , (list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] , A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCAmelCase : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_UpperCAmelCase : List[str] = element[1]
elif first_field is not None:
_UpperCAmelCase : Tuple = first_field
else:
for field in class_fields:
_UpperCAmelCase : int = getattr(self , field.name )
if v is not None:
_UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A_ , A_ ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ , A_ )
super().__setattr__(A_ , A_ )
def __setitem__( self , A_ , A_ ):
'''simple docstring'''
super().__setitem__(A_ , A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class a ( UpperCAmelCase , UpperCAmelCase ):
@classmethod
def _UpperCAmelCase ( cls , A_ ):
'''simple docstring'''
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class a ( UpperCAmelCase ):
_lowercase = "longest"
_lowercase = "max_length"
_lowercase = "do_not_pad"
class a ( UpperCAmelCase ):
_lowercase = "pt"
_lowercase = "tf"
_lowercase = "np"
_lowercase = "jax"
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = context_managers
_UpperCAmelCase : Dict = ExitStack()
def __enter__( self ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self , *A_ , **A_ ):
'''simple docstring'''
self.stack.__exit__(*A_ , **A_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = infer_framework(lowerCAmelCase )
if framework == "tf":
_UpperCAmelCase : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> List[str]:
_UpperCAmelCase : List[Any] = model_class.__name__
_UpperCAmelCase : Dict = infer_framework(lowerCAmelCase )
if framework == "tf":
_UpperCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: MutableMapping , lowerCAmelCase: str = "" , lowerCAmelCase: str = "." ) -> List[Any]:
def _flatten_dict(lowerCAmelCase: int , lowerCAmelCase: Tuple="" , lowerCAmelCase: List[str]="." ):
for k, v in d.items():
_UpperCAmelCase : Optional[int] = str(lowerCAmelCase ) + delimiter + str(lowerCAmelCase ) if parent_key else k
if v and isinstance(lowerCAmelCase , lowerCAmelCase ):
yield from flatten_dict(lowerCAmelCase , lowerCAmelCase , delimiter=lowerCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: bool = False ) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Tuple=None ) -> List[str]:
if is_numpy_array(lowerCAmelCase ):
return np.transpose(lowerCAmelCase , axes=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.T if axes is None else array.permute(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase , perm=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.transpose(lowerCAmelCase , axes=lowerCAmelCase )
else:
raise ValueError(F'Type not supported for transpose: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any ) -> int:
if is_numpy_array(lowerCAmelCase ):
return np.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.reshape(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.reshape(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(F'Type not supported for reshape: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Union[str, Any]=None ) -> Union[str, Any]:
if is_numpy_array(lowerCAmelCase ):
return np.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: List[str] ) -> Union[str, Any]:
if is_numpy_array(lowerCAmelCase ):
return np.expand_dims(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.unsqueeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> int:
if is_numpy_array(lowerCAmelCase ):
return np.size(lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.size(lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: List[Any] ) -> List[Any]:
for key, value in auto_map.items():
if isinstance(lowerCAmelCase , (tuple, list) ):
_UpperCAmelCase : List[Any] = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCAmelCase : Tuple = F'{repo_id}--{value}'
return auto_map
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> List[Any]:
for base_class in inspect.getmro(lowerCAmelCase ):
_UpperCAmelCase : int = base_class.__module__
_UpperCAmelCase : Dict = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 189
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__A ="\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
@staticmethod
def snake_case__ ( a_ : ArgumentParser ):
'''simple docstring'''
__UpperCAmelCase : str = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=a_ , required=a_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=a_ , required=a_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=a_ , required=a_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=a_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=a_ , default=a_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=a_ )
def __init__( self : Union[str, Any] , a_ : str , a_ : str , a_ : str , a_ : str , a_ : str , *a_ : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
__UpperCAmelCase : List[str] = model_type
__UpperCAmelCase : Tuple = tf_checkpoint
__UpperCAmelCase : Tuple = pytorch_dump_output
__UpperCAmelCase : Optional[Any] = config
__UpperCAmelCase : int = finetuning_task_name
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
if "ckpt" in self._tf_checkpoint.lower():
__UpperCAmelCase : Any = self._tf_checkpoint
__UpperCAmelCase : Dict = ''''''
else:
__UpperCAmelCase : str = self._tf_checkpoint
__UpperCAmelCase : Tuple = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
a_ , self._config , self._pytorch_dump_output , a_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 226
|
import qiskit
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCAmelCase : str = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
__A =half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 226
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__snake_case :Any = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__snake_case :List[str] = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
)
__snake_case :Optional[Any] = '''|'''.join(sys.argv[1:])
__snake_case :str = re.compile(rf'^({joined_dirs}).*?\.py$')
__snake_case :Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 350
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Dict = logging.get_logger()
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : list = field(default_factory=__UpperCAmelCase )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = len(list(m.modules())) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE)
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__SCREAMING_SNAKE_CASE)
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return list(filter(lambda __SCREAMING_SNAKE_CASE: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int = 1
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : bool = True
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = Tracker(self.dest)(__SCREAMING_SNAKE_CASE).parametrized
__a = Tracker(self.src)(__SCREAMING_SNAKE_CASE).parametrized
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.src_skip , __SCREAMING_SNAKE_CASE))
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.dest_skip , __SCREAMING_SNAKE_CASE))
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE)} operations while'
F' destination module has {len(__SCREAMING_SNAKE_CASE)}.')
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : nn.Module):
'''simple docstring'''
super().__init__()
__a = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block'''), F'Unexpected layer name {k}'
__a = len(__SCREAMING_SNAKE_CASE) + 1
feature_blocks.append((F'res{block_index}', v))
__a = nn.ModuleDict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
return get_trunk_forward_outputs(
__SCREAMING_SNAKE_CASE , out_feat_keys=__SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , )
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = x.split('''-''')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
if x not in self:
__a = self.convert_name_to_timm(__SCREAMING_SNAKE_CASE)
__a = partial(lambda: (timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE).eval(), None))
else:
__a = super().__getitem__(__SCREAMING_SNAKE_CASE)
return val
class _A ( __UpperCAmelCase ):
def __getitem__( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__a = RegNetModel
else:
__a = RegNetForImageClassification
return val
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for from_key, to_key in keys:
__a = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
print(f'Converting {name}...' )
with torch.no_grad():
__a , __a = from_model_func()
__a = our_model_func(_UpperCAmelCase ).eval()
__a = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase , raise_if_mismatch=_UpperCAmelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
if from_state_dict is not None:
__a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__a = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
__a = manually_copy_vissl_head(_UpperCAmelCase , our_model.state_dict() , _UpperCAmelCase )
our_model.load_state_dict(_UpperCAmelCase )
__a = our_model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
__a = (
our_outputs.logits if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else our_outputs.last_hidden_state
)
__a = from_model(_UpperCAmelCase )
__a = from_output[-1] if type(_UpperCAmelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__a = our_outputs.hidden_states[-1]
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_UpperCAmelCase , )
__a = 224 if '''seer''' not in name else 384
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_UpperCAmelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_UpperCAmelCase , )
print(f'Pushed {name}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
__a = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
__a = NameToOurModelFuncMap()
__a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_UpperCAmelCase , _UpperCAmelCase ) -> Tuple[nn.Module, Dict]:
__a = torch.hub.load_state_dict_from_url(_UpperCAmelCase , model_dir=str(_UpperCAmelCase ) , map_location='''cpu''' )
__a = model_func()
# check if we have a head, if yes add it
__a = files['''classy_state_dict''']['''base_model''']['''model''']
__a = model_state_dict['''trunk''']
model.load_state_dict(_UpperCAmelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return config, expected_shape
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__snake_case :Tuple = parser.parse_args()
__snake_case :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 131
| 0
|
import argparse
from collections import defaultdict
import yaml
A : str = '''docs/source/en/_toctree.yml'''
def __lowerCamelCase ( __a :str ) -> List[Any]:
"""simple docstring"""
A__ = defaultdict(__a )
A__ = []
A__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__a )
A__ = new_doc_list
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
A__ = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__a )
# Sort
return overview_doc
def __lowerCamelCase ( __a :Any=False ) -> List[str]:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A__ = api_doc[scheduler_idx]["""sections"""]
A__ = clean_doc_toc(__a )
A__ = False
if new_scheduler_doc != scheduler_doc:
A__ = True
if overwrite:
A__ = new_scheduler_doc
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A__ = False
A__ = api_doc[pipeline_idx]["""sections"""]
A__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A__ = pipeline_doc["""section"""]
A__ = clean_doc_toc(__a )
if overwrite:
A__ = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
A__ = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
A__ = True
if overwrite:
A__ = new_pipeline_docs
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 274
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :List[str] ) -> Dict:
"""simple docstring"""
def signal_handler(__a :List[Any] , __a :Optional[Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
"""simple docstring"""
raise OSError
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 274
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase = " "):
__lowerCAmelCase = []
__lowerCAmelCase = 0
for index, char in enumerate(lowerCamelCase):
if char == separator:
split_words.append(string[last_index:index])
__lowerCAmelCase = index + 1
elif index + 1 == len(lowerCamelCase):
split_words.append(string[last_index : index + 1])
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 9
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9
| 1
|
"""simple docstring"""
lowerCAmelCase__ = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355818,
}
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase : List[str] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''instructblip_vision_model'''
def __init__( self : str , lowerCAmelCase__ : Dict=1_4_0_8 , lowerCAmelCase__ : int=6_1_4_4 , lowerCAmelCase__ : List[str]=3_9 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : Tuple=2_2_4 , lowerCAmelCase__ : Tuple=1_4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Union[str, Any]=1e-6 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[int]=1e-10 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Tuple = qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''instructblip_qformer'''
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any]=3_0_5_2_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Optional[int]=1e-12 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=1_4_0_8 , **lowerCAmelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Tuple = cross_attention_frequency
_UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Tuple = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''instructblip'''
UpperCamelCase_ : Dict = True
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=3_2 , **lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : List[str] = InstructBlipVisionConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = InstructBlipQFormerConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.text_config.tie_word_embeddings
_UpperCAmelCase : List[Any] = self.text_config.is_encoder_decoder
_UpperCAmelCase : List[str] = num_query_tokens
_UpperCAmelCase : int = self.vision_config.hidden_size
_UpperCAmelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : int = 1.0
_UpperCAmelCase : Dict = 0.02
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : InstructBlipVisionConfig , lowerCAmelCase__ : InstructBlipQFormerConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.qformer_config.to_dict()
_UpperCAmelCase : List[Any] = self.text_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
| 145
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,*A : List[str] ,**A : str ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." ,A ,)
super().__init__(*A ,**A )
| 124
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "facebook/bart-large-mnli"
snake_case_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
snake_case_ = "text_classifier"
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSequenceClassification
snake_case_ = ["text", ["text"]]
snake_case_ = ["text"]
def UpperCamelCase_ ( self : str ):
super().setup()
__A = self.model.config
__A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__A = int(A )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Dict ):
__A = labels
return self.pre_processor(
[text] * len(A ) ,[f'''This example is {label}''' for label in labels] ,return_tensors="pt" ,padding="max_length" ,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Tuple ):
__A = outputs.logits
__A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 124
| 1
|
lowerCamelCase : dict[str, float] ={
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.35_5818,
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ : Union[str, Any] = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(__lowerCAmelCase )}'
)
raise ValueError(__lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase : int ={
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] =['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Union[str, Any] = ['image_processor', 'tokenizer']
lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
lowercase : Union[str, Any] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
__UpperCamelCase : int = kwargs.pop("feature_extractor" )
__UpperCamelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
__UpperCamelCase : Any = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__UpperCamelCase : str = features["words"]
__UpperCamelCase : Optional[int] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
__UpperCamelCase : List[Any] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__UpperCamelCase : Tuple = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
__UpperCamelCase : List[Any] = images
return encoded_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 171
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 171
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( lowercase_ ):
def __init__( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> List[str]:
'''simple docstring'''
self.events.append("on_init_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> List[str]:
'''simple docstring'''
self.events.append("on_train_begin" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> int:
'''simple docstring'''
self.events.append("on_train_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Any:
'''simple docstring'''
self.events.append("on_epoch_begin" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
self.events.append("on_epoch_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
self.events.append("on_step_begin" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
self.events.append("on_step_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
self.events.append("on_evaluate" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
self.events.append("on_predict" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> str:
'''simple docstring'''
self.events.append("on_save" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Tuple:
'''simple docstring'''
self.events.append("on_log" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> str:
'''simple docstring'''
self.events.append("on_prediction_step" )
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = tempfile.mkdtemp()
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def lowerCAmelCase__ ( self , a__=0 , a__=0 , a__=64 , a__=64 , a__=None , a__=False , **a__ ) -> int:
'''simple docstring'''
snake_case_ = RegressionDataset(length=a__ )
snake_case_ = RegressionDataset(length=a__ )
snake_case_ = RegressionModelConfig(a=a__ , b=a__ )
snake_case_ = RegressionPreTrainedModel(a__ )
snake_case_ = TrainingArguments(self.output_dir , disable_tqdm=a__ , report_to=[] , **a__ )
return Trainer(
a__ , a__ , train_dataset=a__ , eval_dataset=a__ , callbacks=a__ , )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(len(a__ ) , len(a__ ) )
# Order doesn't matter
snake_case_ = sorted(a__ , key=lambda a__ : cb.__name__ if isinstance(a__ , a__ ) else cb.__class__.__name__ )
snake_case_ = sorted(a__ , key=lambda a__ : cb.__name__ if isinstance(a__ , a__ ) else cb.__class__.__name__ )
for cba, cba in zip(a__ , a__ ):
if isinstance(a__ , a__ ) and isinstance(a__ , a__ ):
self.assertEqual(a__ , a__ )
elif isinstance(a__ , a__ ) and not isinstance(a__ , a__ ):
self.assertEqual(a__ , cba.__class__ )
elif not isinstance(a__ , a__ ) and isinstance(a__ , a__ ):
self.assertEqual(cba.__class__ , a__ )
else:
self.assertEqual(a__ , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = ["on_init_end", "on_train_begin"]
snake_case_ = 0
snake_case_ = len(trainer.get_eval_dataloader() )
snake_case_ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(a__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.get_trainer()
snake_case_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
# Callbacks passed at init are added to the default callbacks
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ = self.get_trainer(disable_tqdm=a__ )
snake_case_ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a__ )
expected_callbacks.remove(a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
snake_case_ = self.get_trainer()
snake_case_ = trainer.pop_callback(a__ )
self.assertEqual(cb.__class__ , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
trainer.add_callback(a__ )
expected_callbacks.insert(0 , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
# We can also add, pop, or remove by instance
snake_case_ = self.get_trainer()
snake_case_ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a__ )
expected_callbacks.remove(a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
snake_case_ = self.get_trainer()
snake_case_ = trainer.callback_handler.callbacks[0]
snake_case_ = trainer.pop_callback(a__ )
self.assertEqual(a__ , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
trainer.add_callback(a__ )
expected_callbacks.insert(0 , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=a__ )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
# Independent log/save/eval
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
# A bit of everything
snake_case_ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
snake_case_ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a__ ) in warn_mock.call_args[0][0]
| 85
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
_a : Dict = None
def UpperCAmelCase__( self : Union[str, Any] )-> int:
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__( self : Optional[int] )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.save_pretrained(_SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__( self : int )-> str:
lowerCAmelCase__ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 131
| 0
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: float | Decimal , lowerCAmelCase: float = 10**-10 ) -> float:
_UpperCAmelCase : Optional[int] = a
while True:
_UpperCAmelCase : Tuple = Decimal(lowerCAmelCase ) - (
Decimal(eval(lowerCAmelCase ) ) / Decimal(eval(str(diff(lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase ) ) < precision: # noqa: S307
return float(lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 189
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: List[str] , lowerCAmelCase: int=8 ) -> int:
_UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
_UpperCAmelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
_UpperCAmelCase : Any = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase : Optional[int] = latents.to(A_ )
_UpperCAmelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase : str = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : str = self._execution_device
_UpperCAmelCase : Tuple = guidance_scale > 1.0
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Dict = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Any = torch.cat(A_ , dim=0 )
_UpperCAmelCase : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Tuple = hint.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
_UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
_UpperCAmelCase : Dict = self.scheduler.timesteps
_UpperCAmelCase : Union[str, Any] = self.movq.config.latent_channels
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[Any] = {"image_embeds": image_embeds, "hint": hint}
_UpperCAmelCase : Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = variance_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
_UpperCAmelCase : Optional[Any] = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 189
| 1
|
def _UpperCamelCase ( lowercase__ , lowercase__ = " " ):
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Tuple = 0
for index, char in enumerate(lowercase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__SCREAMING_SNAKE_CASE : str = index + 1
elif index + 1 == len(lowercase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 9
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9
| 1
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __a ( __lowercase ):
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = 5
# Realm tok
lowerCAmelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
lowerCAmelCase_ : str = os.path.join(_a , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(_a , exist_ok=_a )
def A ( self : int ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def A ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : int = RealmConfig(num_block_records=self.num_block_records )
return config
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def A ( self : List[Any] ):
lowerCAmelCase_ : Any = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=_a , )
return block_records
def A ( self : str ):
lowerCAmelCase_ : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A ( self : List[str] ):
lowerCAmelCase_ : Dict = self.get_config()
lowerCAmelCase_ : Tuple = self.get_dummy_retriever()
lowerCAmelCase_ : Dict = retriever.tokenizer
lowerCAmelCase_ : Union[str, Any] = np.array([0, 3] , dtype="""long""" )
lowerCAmelCase_ : Any = tokenizer(["""Test question"""] ).input_ids
lowerCAmelCase_ : int = tokenizer(
["""the fourth"""] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
lowerCAmelCase_ : str = config.reader_seq_len
lowerCAmelCase_ : Any = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors="""np""" )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = self.get_config()
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_retriever()
lowerCAmelCase_ : Tuple = retriever.tokenizer
lowerCAmelCase_ : List[str] = np.array([0, 3, 5] , dtype="""long""" )
lowerCAmelCase_ : int = tokenizer(["""Test question"""] ).input_ids
lowerCAmelCase_ : List[str] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
lowerCAmelCase_ : Union[str, Any] = config.reader_seq_len
lowerCAmelCase_ : List[Any] = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors="""np""" )
self.assertEqual([False, True, True] , _a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a )
def A ( self : str ):
lowerCAmelCase_ : Any = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
lowerCAmelCase_ : str = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
lowerCAmelCase_ : Tuple = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
lowerCAmelCase_ : str = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 365
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 0
|
import math
from collections.abc import Iterator
from itertools import takewhile
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( ) -> Iterator[int]:
snake_case : List[str] = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def SCREAMING_SNAKE_CASE__ ( lowercase = 2000000 ) -> int:
return sum(takewhile(lambda lowercase : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 124
|
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case : Optional[Any] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
| 1
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343
| 1
|
"""simple docstring"""
import os
def a__ ( lowerCAmelCase = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(lowerCAmelCase ) , lowerCAmelCase ) ) as input_file:
UpperCAmelCase__ : int = [
[int(lowerCAmelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(matrix[0] )
UpperCAmelCase__ : int = [[-1 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase ):
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase ):
UpperCAmelCase__ : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase__ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 171
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""OwlViTFeatureExtractor"""]
_A = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( a__ ):
snake_case__ = (IPNDMScheduler,)
snake_case__ = (('''num_inference_steps''', 5_0),)
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {'num_train_timesteps': 10_00}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , 'set_timesteps' ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , 'set_timesteps' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case , time_step=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_snake_case , time_step=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 309
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 1
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
UpperCamelCase__ : Optional[int] = FileLock(str(tmpdir / "foo.lock" ) )
UpperCamelCase__ : Any = FileLock(str(tmpdir / "foo.lock" ) )
UpperCamelCase__ : Dict = 0.0_1
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
UpperCamelCase__ : int = time.time()
locka.acquire(__lowerCAmelCase )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
UpperCamelCase__ : Dict = "a" * 1000 + ".lock"
UpperCamelCase__ : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCamelCase__ : str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
locka.acquire(0 )
| 189
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase : Optional[Any] =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCamelCase : str =subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
lowerCamelCase : List[Any] ='''|'''.join(sys.argv[1:])
lowerCamelCase : str =re.compile(RF"""^({joined_dirs}).*?\.py$""")
lowerCamelCase : Optional[int] =[x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 189
| 1
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : List[str] = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Union[str, Any] = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : str = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : int = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : List[str] = """tabby, tabby cat"""
lowerCAmelCase : Any = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Union[str, Any]:
lowerCamelCase = {}
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = model.mobilenet_va
else:
lowerCamelCase = model
lowerCamelCase = """MobilenetV1/Conv2d_0/"""
lowerCamelCase = backbone.conv_stem.convolution.weight
lowerCamelCase = backbone.conv_stem.normalization.bias
lowerCamelCase = backbone.conv_stem.normalization.weight
lowerCamelCase = backbone.conv_stem.normalization.running_mean
lowerCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCamelCase = i + 1
lowerCamelCase = i * 2
lowerCamelCase = backbone.layer[pt_index]
lowerCamelCase = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
lowerCamelCase = pointer.convolution.weight
lowerCamelCase = pointer.normalization.bias
lowerCamelCase = pointer.normalization.weight
lowerCamelCase = pointer.normalization.running_mean
lowerCamelCase = pointer.normalization.running_var
lowerCamelCase = backbone.layer[pt_index + 1]
lowerCamelCase = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
lowerCamelCase = pointer.convolution.weight
lowerCamelCase = pointer.normalization.bias
lowerCamelCase = pointer.normalization.weight
lowerCamelCase = pointer.normalization.running_mean
lowerCamelCase = pointer.normalization.running_var
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
lowerCamelCase = model.classifier.weight
lowerCamelCase = model.classifier.bias
return tf_to_pt_map
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
lowerCamelCase = tf.train.list_variables(snake_case__ )
lowerCamelCase = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
lowerCamelCase = tf.train.load_variable(snake_case__ , snake_case__ )
lowerCamelCase = array
# Build TF to PyTorch weights loading map
lowerCamelCase = _build_tf_to_pytorch_map(snake_case__ , snake_case__ , snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
lowerCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
lowerCamelCase = np.transpose(snake_case__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCamelCase = array.squeeze().transpose()
else:
lowerCamelCase = np.transpose(snake_case__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
lowerCamelCase = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ , snake_case__ )
tf_weights.pop(name + """/RMSProp""" , snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" , snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def a__ ( snake_case__ , snake_case__ ) -> torch.Tensor:
lowerCamelCase , lowerCamelCase = features.shape[-2:]
lowerCamelCase , lowerCamelCase = conv_layer.stride
lowerCamelCase , lowerCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCamelCase = max(kernel_height - stride_height , 0 )
else:
lowerCamelCase = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCamelCase = max(kernel_width - stride_width , 0 )
else:
lowerCamelCase = max(kernel_width - (in_width % stride_width) , 0 )
lowerCamelCase = pad_along_width // 2
lowerCamelCase = pad_along_width - pad_left
lowerCamelCase = pad_along_height // 2
lowerCamelCase = pad_along_height - pad_top
lowerCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ , snake_case__ , """constant""" , 0.0 )
class __magic_name__ ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a = 1 , _a = 1 , _a = False , _a = True , _a = True , ):
"""simple docstring"""
super().__init__()
lowerCamelCase = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
lowerCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCamelCase = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode="""zeros""" , )
if use_normalization:
lowerCamelCase = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9_997 , affine=_a , track_running_stats=_a , )
else:
lowerCamelCase = None
if use_activation:
if isinstance(_a , _a ):
lowerCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a ):
lowerCamelCase = ACTaFN[config.hidden_act]
else:
lowerCamelCase = config.hidden_act
else:
lowerCamelCase = None
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if self.config.tf_padding:
lowerCamelCase = apply_tf_padding(_a , self.convolution )
lowerCamelCase = self.convolution(_a )
if self.normalization is not None:
lowerCamelCase = self.normalization(_a )
if self.activation is not None:
lowerCamelCase = self.activation(_a )
return features
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = MobileNetVaConfig
__UpperCamelCase = load_tf_weights_in_mobilenet_va
__UpperCamelCase = "mobilenet_v1"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = False
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Union[str, Any] = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Tuple = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , UpperCAmelCase__ , )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a = True ):
"""simple docstring"""
super().__init__(_a )
lowerCamelCase = config
lowerCamelCase = 32
lowerCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCamelCase = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
lowerCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCamelCase = nn.ModuleList()
for i in range(13 ):
lowerCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ) )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ) )
lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self , _a = None , _a = None , _a = None , ):
"""simple docstring"""
lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCamelCase = self.conv_stem(_a )
lowerCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCamelCase = layer_module(_a )
if output_hidden_states:
lowerCamelCase = all_hidden_states + (hidden_states,)
lowerCamelCase = hidden_states
if self.pooler is not None:
lowerCamelCase = torch.flatten(self.pooler(_a ) , start_dim=1 )
else:
lowerCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
super().__init__(_a )
lowerCamelCase = config.num_labels
lowerCamelCase = MobileNetVaModel(_a )
lowerCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=_a )
lowerCamelCase = nn.Linear(_a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , ):
"""simple docstring"""
lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a )
lowerCamelCase = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase = self.classifier(self.dropout(_a ) )
lowerCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase = """single_label_classification"""
else:
lowerCamelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCamelCase = MSELoss()
if self.num_labels == 1:
lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase = CrossEntropyLoss()
lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase = BCEWithLogitsLoss()
lowerCamelCase = loss_fct(_a , _a )
if not return_dict:
lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 354
|
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(_a ) != 0:
lowerCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_a ) != cols:
raise error
for value in row:
if not isinstance(_a , (int, float) ):
raise error
lowerCamelCase = rows
else:
lowerCamelCase = []
def _lowerCAmelCase ( self ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.rows )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return bool(self.determinant() )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_a ).determinant()
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_a , _a )
return -1 * self.get_minor(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_a , _a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
"""simple docstring"""
return str(self.rows )
def __str__( self ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(_a ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(_a , _a ):
raise type_error
for value in row:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(_a )
else:
lowerCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(_a , _a ):
raise type_error
for value in column:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowerCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _a ):
"""simple docstring"""
return not self == other
def __neg__( self ):
"""simple docstring"""
return self * -1
def __add__( self , _a ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _a ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _a ):
"""simple docstring"""
if isinstance(_a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_a , _a ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(_a , _a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowerCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCAmelCase ( cls , _a , _a ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 0
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : Union[str, Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_lowerCamelCase : str = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''mask2former'''
UpperCAmelCase__ = ['''swin''']
UpperCAmelCase__ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Dict] = None , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 1_024 , UpperCAmelCase__ : str = "relu" , UpperCAmelCase__ : int = 6 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 2_048 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 255 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 2.0 , UpperCAmelCase__ : float = 5.0 , UpperCAmelCase__ : float = 5.0 , UpperCAmelCase__ : int = 12_544 , UpperCAmelCase__ : float = 3.0 , UpperCAmelCase__ : float = 0.75 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : List[int] = [4, 8, 16, 32] , UpperCAmelCase__ : bool = None , **UpperCAmelCase__ : Union[str, Any] , ) ->Optional[Any]:
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''')
A__ = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = backbone_config.pop('''model_type''')
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCAmelCase__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported)}""")
A__ = backbone_config
A__ = feature_size
A__ = mask_feature_size
A__ = hidden_dim
A__ = encoder_feedforward_dim
A__ = activation_function
A__ = encoder_layers
A__ = decoder_layers
A__ = num_attention_heads
A__ = dropout
A__ = dim_feedforward
A__ = pre_norm
A__ = enforce_input_projection
A__ = common_stride
A__ = ignore_value
A__ = num_queries
A__ = no_object_weight
A__ = class_weight
A__ = mask_weight
A__ = dice_weight
A__ = train_num_points
A__ = oversample_ratio
A__ = importance_sample_ratio
A__ = init_std
A__ = init_xavier_std
A__ = use_auxiliary_loss
A__ = feature_strides
A__ = output_auxiliary_logits
A__ = decoder_layers
super().__init__(**UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : List[Any]) ->str:
'''simple docstring'''
return cls(
backbone_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict[str, any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
| 14
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 50 ):
'''simple docstring'''
A : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 369
|
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311
| 0
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0 ) -> Any:
'''simple docstring'''
# Format the message.
if name is None:
UpperCamelCase = None
else:
UpperCamelCase = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
UpperCamelCase = fmt.format(UpperCamelCase_ )
# Print and recurse (if needed).
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if msg is not None:
print(UpperCamelCase_ )
for k in val.keys():
recursive_print(UpperCamelCase_ , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase_ , torch.Tensor ):
print(UpperCamelCase_ , """:""" , val.size() )
else:
print(UpperCamelCase_ , """:""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
UpperCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase = param.view(*UpperCamelCase_ )
UpperCamelCase = param.transpose(0 , 2 )
UpperCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase = param.view(*UpperCamelCase_ )
UpperCamelCase = param.transpose(0 , 1 ).contiguous()
UpperCamelCase = param.view(*UpperCamelCase_ )
return param
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
# The converted output model.
UpperCamelCase = {}
# old versions did not store training args
UpperCamelCase = input_state_dict.get("""args""" , UpperCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase = ds_args.padded_vocab_size
UpperCamelCase = ds_args.max_position_embeddings
UpperCamelCase = ds_args.hidden_size
UpperCamelCase = ds_args.num_layers
UpperCamelCase = ds_args.num_attention_heads
UpperCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase = config.n_head
# The hidden_size per head.
UpperCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase = input_state_dict["""checkpoint_version"""]
else:
UpperCamelCase = 0.0
# The model.
UpperCamelCase = input_state_dict["""model"""]
# The language model.
UpperCamelCase = model["""language_model"""]
# The embeddings.
UpperCamelCase = lm["""embedding"""]
# The word embeddings.
UpperCamelCase = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase = word_embeddings[: config.vocab_size, :]
UpperCamelCase = word_embeddings
# The position embeddings.
UpperCamelCase = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
UpperCamelCase = pos_embeddings
# The transformer.
UpperCamelCase = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
UpperCamelCase = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
UpperCamelCase = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase = layer_re.match(UpperCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase = m.group(3 )
# The name of the layer.
UpperCamelCase = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
UpperCamelCase = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
UpperCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase = torch.tensor(-1E4 , dtype=torch.floataa )
UpperCamelCase = masked_bias
UpperCamelCase = fix_query_key_value_ordering(UpperCamelCase_ , UpperCamelCase_ , 3 , UpperCamelCase_ , UpperCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase = fix_query_key_value_ordering(UpperCamelCase_ , UpperCamelCase_ , 3 , UpperCamelCase_ , UpperCamelCase_ )
# Store. No change of shape.
UpperCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase = transformer["""final_layernorm.weight"""]
UpperCamelCase = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase = word_embeddings
# It should be done!
return output_state_dict
def lowercase( ) -> str:
'''simple docstring'''
# Create the argument parser.
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=UpperCamelCase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=UpperCamelCase_ , help="""An optional config json file describing the pre-trained model.""" , )
UpperCamelCase = parser.parse_args()
# Extract the basename.
UpperCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
UpperCamelCase = torch.load(UpperCamelCase_ , map_location="""cpu""" )
else:
UpperCamelCase = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
UpperCamelCase = input_state_dict.get("""args""" , UpperCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase = """gelu_fast"""
elif ds_args.openai_gelu:
UpperCamelCase = """gelu_new"""
else:
UpperCamelCase = """gelu"""
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase = """gelu_new"""
# Spell out all parameters in case the defaults change.
UpperCamelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type="""cls_index""" , summary_use_proj=UpperCamelCase_ , summary_activation=UpperCamelCase_ , summary_proj_to_labels=UpperCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , bos_token_id=50256 , eos_token_id=50256 , )
else:
UpperCamelCase = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
UpperCamelCase = convert_megatron_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase_ , UpperCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
UpperCamelCase = """gpt2"""
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
UpperCamelCase = type(UpperCamelCase_ ).__name__
UpperCamelCase = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(UpperCamelCase_ )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(UpperCamelCase_ )
# Store the state_dict to file.
UpperCamelCase = os.path.join(UpperCamelCase_ , """pytorch_model.bin""" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = GPTaTokenizer
SCREAMING_SNAKE_CASE = GPTaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
UpperCAmelCase__ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase__ : Union[str, Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """lower newer"""
UpperCAmelCase__ : Any = """lower newer"""
return input_text, output_text
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase__ : Dict = """lower newer"""
UpperCAmelCase__ : int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase__ : Tuple = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
UpperCAmelCase__ : int = """lower newer"""
# Testing tokenization
UpperCAmelCase__ : List[Any] = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCAmelCase__ : Tuple = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase__ : Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCAmelCase__ : Dict = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase__ : Dict = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
UpperCAmelCase__ : int = tokenizer.encode(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCAmelCase__ : Tuple = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing the unknown token
UpperCAmelCase__ : Optional[Any] = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
pass
def _a (self , _lowerCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
UpperCAmelCase__ : Union[str, Any] = """This is a simple input"""
UpperCAmelCase__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCAmelCase__ : List[str] = ("""This is a simple input""", """This is a pair""")
UpperCAmelCase__ : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
UpperCAmelCase__ : int = """This is a simple input"""
UpperCAmelCase__ : Optional[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
UpperCAmelCase__ : Tuple = ("""This is a simple input""", """This is a pair""")
UpperCAmelCase__ : Any = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
UpperCAmelCase__ : Optional[Any] = tokenizer.pad_token_id
UpperCAmelCase__ : List[Any] = tokenizer(_lowerCamelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
UpperCAmelCase__ : int = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="""np""" )
UpperCAmelCase__ : List[str] = tokenizer(*_lowerCamelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
UpperCAmelCase__ : Tuple = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = """$$$"""
UpperCAmelCase__ : int = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCamelCase , add_bos_token=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = """This is a simple input"""
UpperCAmelCase__ : int = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer.bos_token_id
UpperCAmelCase__ : Optional[int] = tokenizer(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer(_lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , _lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase__ : Dict = tokenizer.decode(out_s.input_ids )
UpperCAmelCase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = [self.get_tokenizer(do_lower_case=_lowerCamelCase , add_bos_token=_lowerCamelCase )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : str = """Encode this."""
UpperCAmelCase__ : Optional[int] = """This one too please."""
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
encoded_sequence += tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer.encode_plus(
_lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , )
UpperCAmelCase__ : Dict = encoded_sequence_dict["""input_ids"""]
UpperCAmelCase__ : Union[str, Any] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
UpperCAmelCase__ : int = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_lowerCamelCase )
]
UpperCAmelCase__ : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_lowerCamelCase )
UpperCAmelCase__ : Any = """A photo of a cat"""
UpperCAmelCase__ : Any = tokenizer.encode(
_lowerCamelCase , )
self.assertEqual(_lowerCamelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("""./test_opt""" )
UpperCAmelCase__ : int = tokenizer.encode(
_lowerCamelCase , )
self.assertEqual(_lowerCamelCase , [2, 250, 1345, 9, 10, 4758] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=_lowerCamelCase )
UpperCAmelCase__ : str = """A photo of a cat"""
UpperCAmelCase__ : Dict = tokenizer.encode(
_lowerCamelCase , )
# Same as above
self.assertEqual(_lowerCamelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_lowerCamelCase )
UpperCAmelCase__ : Dict = """bos"""
UpperCAmelCase__ : int = tokenizer.get_vocab()["""bos"""]
UpperCAmelCase__ : List[str] = """A photo of a cat"""
UpperCAmelCase__ : List[Any] = tokenizer.encode(
_lowerCamelCase , )
# We changed the bos token
self.assertEqual(_lowerCamelCase , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
UpperCAmelCase__ : int = tokenizer.encode(
_lowerCamelCase , )
self.assertEqual(_lowerCamelCase , [31957, 250, 1345, 9, 10, 4758] )
| 166
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : List[str] = eval_examples
UpperCAmelCase__ : List[Any] = post_process_function
def _a (self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = gen_kwargs.copy()
UpperCAmelCase__ : Optional[Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase__ : int = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase__ : int = gen_kwargs
UpperCAmelCase__ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ : Tuple = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : List[str] = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : int = eval_loop(
_lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : Any = compute_metrics
UpperCAmelCase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase__ : List[str] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : Tuple = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase__ : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = gen_kwargs.copy()
UpperCAmelCase__ : List[Any] = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Any = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = time.time()
UpperCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : List[str] = eval_loop(
_lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : int = compute_metrics
UpperCAmelCase__ : Any = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ : Optional[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , """predict""" )
UpperCAmelCase__ : List[str] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : str = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 166
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase = """"""
for i in range(len(lowerCAmelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = xor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = apply_sbox(lowerCAmelCase__ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowerCAmelCase__ , temp[4:] )
lowercase = """0""" * (2 - len(lowerCAmelCase__ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowerCAmelCase__ )) + r
lowercase = apply_table(l + r , lowerCAmelCase__ )
lowercase = xor(lowerCAmelCase__ , lowerCAmelCase__ )
return temp + right
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =input("""Enter 10 bit key: """)
__lowerCAmelCase : List[Any] =input("""Enter 8 bit message: """)
__lowerCAmelCase : Union[str, Any] =[6, 3, 7, 4, 8, 5, 1_0, 9]
__lowerCAmelCase : Any =[3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__lowerCAmelCase : Optional[int] =[2, 4, 3, 1]
__lowerCAmelCase : List[str] =[2, 6, 3, 1, 4, 8, 5, 7]
__lowerCAmelCase : Optional[int] =[4, 1, 3, 5, 7, 2, 8, 6]
__lowerCAmelCase : List[Any] =[4, 1, 2, 3, 2, 3, 4, 1]
__lowerCAmelCase : Optional[Any] =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCAmelCase : List[Any] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCAmelCase : List[str] =apply_table(key, paa_table)
__lowerCAmelCase : Union[str, Any] =temp[:5]
__lowerCAmelCase : Any =temp[5:]
__lowerCAmelCase : int =left_shift(left)
__lowerCAmelCase : Any =left_shift(right)
__lowerCAmelCase : Dict =apply_table(left + right, pa_table)
__lowerCAmelCase : Union[str, Any] =left_shift(left)
__lowerCAmelCase : Union[str, Any] =left_shift(right)
__lowerCAmelCase : Optional[int] =left_shift(left)
__lowerCAmelCase : str =left_shift(right)
__lowerCAmelCase : int =apply_table(left + right, pa_table)
# encryption
__lowerCAmelCase : int =apply_table(message, IP)
__lowerCAmelCase : Optional[int] =function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Any =temp[4:] + temp[:4]
__lowerCAmelCase : List[str] =function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Tuple =apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__lowerCAmelCase : List[str] =apply_table(CT, IP)
__lowerCAmelCase : Dict =function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Optional[int] =temp[4:] + temp[:4]
__lowerCAmelCase : Optional[Any] =function(expansion, sa, sa, keya, temp)
__lowerCAmelCase : Optional[int] =apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 197
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def _A () -> Generator[int, None, None]:
'''simple docstring'''
_a = {}
_a = 2
while True:
_a = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if factor:
_a = factor + prime
while x in factor_map:
x += factor
_a = factor
else:
_a = prime
yield prime
prime += 1
def _A (lowerCAmelCase__ :float = 1E10 ) -> int:
'''simple docstring'''
_a = sieve()
_a = 1
while True:
_a = next(lowerCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 168
| 0
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( snake_case_ : Dataset ,snake_case_ : Dict[str, str] ):
'''simple docstring'''
UpperCamelCase : List[str] = args.log_outputs
UpperCamelCase : Tuple = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCamelCase : List[Any] = load_metric("""wer""" )
UpperCamelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCamelCase : str = wer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
UpperCamelCase : Dict = cer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
# print & log results
UpperCamelCase : Optional[int] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(f'{dataset_id}_eval_results.txt' ,"""w""" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase : Optional[Any] = f'log_{dataset_id}_predictions.txt'
UpperCamelCase : str = f'log_{dataset_id}_targets.txt'
with open(snake_case_ ,"""w""" ) as p, open(snake_case_ ,"""w""" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] ,snake_case_ : Tuple ):
p.write(f'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(snake_case_ ,with_indices=snake_case_ )
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase : str = re.sub(snake_case_ ,"""""" ,text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCamelCase : Tuple = """ """.join(text.split(snake_case_ ) )
return text
def A_ ( snake_case_ : str ):
'''simple docstring'''
# load dataset
UpperCamelCase : Union[str, Any] = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase : Dict = feature_extractor.sampling_rate
# resample audio
UpperCamelCase : Optional[Any] = dataset.cast_column("""audio""" ,Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase : int = 0 if torch.cuda.is_available() else -1
UpperCamelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" ,model=args.model_id ,device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Union[str, Any] ):
UpperCamelCase : List[Any] = asr(
batch["""audio"""]["""array"""] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s )
UpperCamelCase : Union[str, Any] = prediction["""text"""]
UpperCamelCase : Optional[Any] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCamelCase : Any = dataset.map(snake_case_ ,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ ,snake_case_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__A : Optional[Any] = parser.parse_args()
main(args)
| 27
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A : Any = logging.get_logger(__name__)
__A : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A : Optional[Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__A : Any = {'''allegro/herbert-base-cased''': 514}
__A : Optional[Any] = {}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase : List[str] = PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = HerbertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_="</s>" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Dict = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 27
| 1
|
'''simple docstring'''
import numpy as np
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float = 1E-1_2 , UpperCAmelCase_ : int = 100 , ):
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase_ ) == np.iscomplexobj(UpperCAmelCase_ )
lowerCamelCase_ = np.iscomplexobj(UpperCAmelCase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1E1_2
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase_ = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
# Normalize the resulting output vector.
lowerCamelCase_ = w / np.linalg.norm(UpperCAmelCase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase_ = vector.conj().T if is_complex else vector.T
lowerCamelCase_ = np.dot(UpperCAmelCase_ , np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Check convergence.
lowerCamelCase_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase_ = True
lowerCamelCase_ = lambda_
if is_complex:
lowerCamelCase_ = np.real(lambda_ )
return lambda_, vector
def __snake_case ( ):
lowerCamelCase_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase_ = np.array([41, 4, 20] )
lowerCamelCase_ = real_input_matrix.astype(np.complexaaa )
lowerCamelCase_ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase_ = real_input_matrix
lowerCamelCase_ = real_vector
elif problem_type == "complex":
lowerCamelCase_ = complex_input_matrix
lowerCamelCase_ = complex_vector
# Our implementation.
lowerCamelCase_ ,lowerCamelCase_ = power_iteration(UpperCAmelCase_ , UpperCAmelCase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase_ ,lowerCamelCase_ = np.linalg.eigh(UpperCAmelCase_ )
# Last eigenvalue is the maximum one.
lowerCamelCase_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase_ ) - np.abs(UpperCAmelCase_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 55
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__snake_case = namedtuple('''covid_data''', '''cases deaths recovered''')
def a ( __a = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
UpperCamelCase__ :Tuple = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
__snake_case = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 219
|
'''simple docstring'''
from math import ceil
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = list(range(0 , __a ) )
UpperCamelCase__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ :Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__a )
# Missing blocks
UpperCamelCase__ :List[str] = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__a ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__a ) )
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = list(range(__a ) )
UpperCamelCase__ :Any = int(ceil(n_layers / len(__a ) ) )
UpperCamelCase__ :List[Any] = [layers[i : i + n_blocks] for i in range(0 , __a , __a )]
return dict(zip(__a , __a ) )
| 219
| 1
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _A ( *_lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase=True , _lowerCAmelCase=2 ):
"""simple docstring"""
from .. import __version__
__lowercase =take_from
__lowercase =()
if not isinstance(args[0] , _lowerCAmelCase ):
__lowercase =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCAmelCase ).base_version ) >= version.parse(_lowerCAmelCase ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
__lowercase =None
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCAmelCase ),)
__lowercase =f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(_lowerCAmelCase , _lowerCAmelCase ):
values += (getattr(_lowerCAmelCase , _lowerCAmelCase ),)
__lowercase =f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
__lowercase =f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
__lowercase =warning + ' ' if standard_warn else ''
warnings.warn(warning + message , _lowerCAmelCase , stacklevel=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowercase =inspect.getouterframes(inspect.currentframe() )[1]
__lowercase =call_frame.filename
__lowercase =call_frame.lineno
__lowercase =call_frame.function
__lowercase , __lowercase =next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(_lowerCAmelCase ) == 0:
return
elif len(_lowerCAmelCase ) == 1:
return values[0]
return values
| 166
|
'''simple docstring'''
from datetime import datetime
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase ='https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__lowercase =requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_lowerCAmelCase ).content
if __name__ == "__main__":
lowerCamelCase = input("""Enter Video/IGTV url: """).strip()
lowerCamelCase = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 166
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = IFPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Dict=0 ) -> List[str]:
'''simple docstring'''
if str(snake_case_ ).startswith("mps" ):
A__ = torch.manual_seed(snake_case_ )
else:
A__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : Any ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : Tuple ) -> int:
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
A__ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case_ , tokenizer=snake_case_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
A__, A__ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A__ = None
A__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A__ = IFImgaImgPipeline(**pipe_a.components )
A__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A__ = IFInpaintingPipeline(**pipe_a.components )
A__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : str , snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case_ )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 230
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE = "#"
class UpperCAmelCase_ :
def __init__( self : Dict ) -> None:
'''simple docstring'''
A__ = {}
def __magic_name__ ( self : Optional[Any] , snake_case_ : str ) -> None:
'''simple docstring'''
A__ = self._trie
for char in text:
if char not in trie:
A__ = {}
A__ = trie[char]
A__ = True
def __magic_name__ ( self : List[Any] , snake_case_ : str ) -> tuple | list:
'''simple docstring'''
A__ = self._trie
for char in prefix:
if char in trie:
A__ = trie[char]
else:
return []
return self._elements(snake_case_ )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : dict ) -> tuple:
'''simple docstring'''
A__ = []
for c, v in d.items():
A__ = [" "] if c == END else [(c + s) for s in self._elements(snake_case_ )]
result.extend(snake_case_ )
return tuple(snake_case_ )
SCREAMING_SNAKE_CASE = Trie()
SCREAMING_SNAKE_CASE = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple:
A__ = trie.find_word(lowercase_ )
return tuple(string + word for word in suffixes )
def _SCREAMING_SNAKE_CASE ( ) -> None:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 230
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
# fmt: off
__a : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__a : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__a : Dict = {'unk_token': '<unk>'}
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
__a : List[str] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__a : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_tokenizer()
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Dict = self.get_image_processor()
__a : Any = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__a : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : int = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : str = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : Tuple = image_processor(__a , return_tensors='np' )
__a : List[Any] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : Optional[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Optional[Any] = 'lower newer'
__a : Union[str, Any] = processor(text=__a )
__a : str = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : List[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : List[str] = 'lower newer'
__a : Optional[int] = self.prepare_image_inputs()
__a : Tuple = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Tuple = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : List[str] = self.prepare_image_inputs()
__a : int = processor(images=__a , visual_prompt=__a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : Union[str, Any] = self.get_tokenizer()
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Any = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
| 27
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCAmelCase = True
from torch.cuda.amp import autocast
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowerCamelCase_ = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowerCamelCase_ = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowerCamelCase_ = field(
default=0.999_995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def UpperCamelCase ( __lowercase : ModelArguments ,__lowercase : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
A_ : Union[str, Any] = logging.WARNING
if model_args.verbose_logging:
A_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A_ : Tuple = logging.INFO
logger.setLevel(__lowercase )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase_ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCamelCase_ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCamelCase_ = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase_ = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCamelCase_ = field(
default=__A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase_ = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = "longest"
lowerCamelCase_ = None
lowerCamelCase_ = None
def __call__( self , lowercase ):
"""simple docstring"""
A_ : str = self.feature_extractor.pad(
lowercase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
A_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
A_ : int = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A_ : Any = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
A_ : Any = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A_ : int = 1
A_ : Optional[int] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A_ : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowercase , min_masks=2 , )
return batch
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , lowercase=1 , lowercase=0 , lowercase=1.0 , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
A_ : List[str] = 0
A_ : str = max_gumbel_temp
A_ : Optional[Any] = min_gumbel_temp
A_ : Any = gumbel_temp_decay
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
model.train()
A_ : str = self._prepare_inputs(lowercase )
if self.use_amp:
with autocast():
A_ : Optional[int] = self.compute_loss(lowercase , lowercase )
else:
A_ : int = self.compute_loss(lowercase , lowercase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A_ : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A_ : List[Any] = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
A_ : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase ).backward()
elif self.use_apex:
with amp.scale_loss(lowercase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A_ , A_ , A_ : List[str] = parser.parse_args_into_dataclasses()
configure_logger(__lowercase ,__lowercase )
# Downloading and loading a dataset from the hub.
A_ : int = load_dataset(data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A_ : Any = DatasetDict()
A_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' ,cache_dir=model_args.cache_dir ,)
A_ : Tuple = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' ,cache_dir=model_args.cache_dir ,)
else:
# make sure only "validation" and "train" keys remain"
A_ : Tuple = DatasetDict()
A_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split='validation' ,cache_dir=model_args.cache_dir ,)
A_ : Union[str, Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f'''{data_args.train_split_name}''' ,cache_dir=model_args.cache_dir ,)
# only normalized-inputs-training is supported
A_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,do_normalize=__lowercase )
def prepare_dataset(__lowercase : Optional[int] ):
# check that all files have the correct sampling rate
A_ , A_ : str = librosa.load(batch[data_args.speech_file_column] ,sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A_ : List[Any] = datasets.map(
__lowercase ,num_proc=data_args.preprocessing_num_workers ,remove_columns=datasets['train'].column_names )
# filter audio files that are too long
A_ : Tuple = vectorized_datasets.filter(
lambda __lowercase : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowercase : Any ):
return feature_extractor(batch['speech'] ,sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A_ : Optional[int] = vectorized_datasets.map(
__lowercase ,batched=__lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,remove_columns=vectorized_datasets['train'].column_names ,)
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A_ : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,gradient_checkpointing=training_args.gradient_checkpointing ,)
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
A_ : int = WavaVecaForPreTraining(__lowercase )
A_ : Union[str, Any] = DataCollatorForWavaVecaPretraining(model=__lowercase ,feature_extractor=__lowercase )
A_ : Union[str, Any] = WavaVecaPreTrainer(
model=__lowercase ,data_collator=__lowercase ,args=__lowercase ,train_dataset=vectorized_datasets['train'] ,eval_dataset=vectorized_datasets['validation'] ,tokenizer=__lowercase ,max_gumbel_temp=model_args.max_gumbel_temperature ,min_gumbel_temp=model_args.min_gumbel_temperature ,gumbel_temp_decay=model_args.gumbel_temperature_decay ,)
trainer.train()
if __name__ == "__main__":
main()
| 192
|
import math
def UpperCamelCase ( __lowercase : int = 1_00 ):
'''simple docstring'''
A_ : List[Any] = sum(i * i for i in range(1 ,n + 1 ) )
A_ : int = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 192
| 1
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
__UpperCamelCase , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase_ = True
lowerCAmelCase_ = "ml.p3.2xlarge"
lowerCAmelCase_ = "accelerate_sagemaker_execution_role"
lowerCAmelCase_ = "hf-sm"
lowerCAmelCase_ = "us-east-1"
lowerCAmelCase_ = 1
lowerCAmelCase_ = "accelerate-sagemaker-1"
lowerCAmelCase_ = "1.6"
lowerCAmelCase_ = "4.4"
lowerCAmelCase_ = "train.py"
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __snake_case ( unittest.TestCase ):
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , _lowercase )
assert isinstance(converted_args["""do_train"""] , _lowercase )
assert isinstance(converted_args["""epochs"""] , _lowercase )
assert isinstance(converted_args["""learning_rate"""] , _lowercase )
assert isinstance(converted_args["""max_steps"""] , _lowercase )
with pytest.raises(_lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 219
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[int]:
'''simple docstring'''
if isinstance(__lowerCAmelCase , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase : int =tf.shape(__lowerCAmelCase )
if tensor.shape == tf.TensorShape(__lowerCAmelCase ):
return dynamic
UpperCAmelCase : Optional[Any] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCAmelCase )]
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None )-> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCAmelCase , name=__lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1e-5 , __lowerCAmelCase=-1 )-> Union[str, Any]:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase , UpperCAmelCase : Optional[Any] =tf.nn.moments(__lowerCAmelCase , axes=[axis] , keepdims=__lowerCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase : Dict =[1] * inputs.shape.rank
UpperCAmelCase : Optional[int] =shape_list(__lowerCAmelCase )[axis]
UpperCAmelCase : List[str] =tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Tuple =tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase : Dict =tf.nn.batch_normalization(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , offset=__lowerCAmelCase , scale=__lowerCAmelCase , variance_epsilon=__lowerCAmelCase , )
return outputs
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=-1 )-> List[Any]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase : Optional[Any] =tf.shape(__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase : List[Any] =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase )-> tf.Tensor:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , tf.Tensor ):
UpperCAmelCase : List[Any] =tf.convert_to_tensor(__lowerCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase : List[Any] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase : Dict =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase : List[str] =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "input_ids" )-> None:
'''simple docstring'''
tf.debugging.assert_less(
__lowerCAmelCase , tf.cast(__lowerCAmelCase , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCAmelCase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : str =6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase : List[str] =[x for x in data if len(__lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
UpperCAmelCase : Optional[int] =np.asarray(__lowerCAmelCase )
UpperCAmelCase : List[str] =1
UpperCAmelCase : List[Any] =np.array_split(__lowerCAmelCase , __lowerCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase : Any =np.array_split(__lowerCAmelCase , __lowerCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCAmelCase ):
UpperCAmelCase : int =chunk_data
else:
UpperCAmelCase : Any =data
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
'''simple docstring'''
if name in group.attrs:
UpperCAmelCase : str =[n.decode('''utf8''' ) if hasattr(__lowerCAmelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase : Union[str, Any] =[]
UpperCAmelCase : str =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__lowerCAmelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
def _expand_single_ad_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCAmelCase )
| 78
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__snake_case = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS)
__snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__snake_case = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Tuple =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
UpperCAmelCase : Optional[Any] =True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __lowerCAmelCase , )
is not None
):
UpperCAmelCase : List[Any] =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase : Optional[Any] =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase : List[str] =[
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase : Optional[int] =['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase : Tuple =True
if not attribute_used:
UpperCAmelCase : Optional[Any] =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase : Any =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase : Optional[int] =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase : List[str] =True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase : Dict =True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase : Tuple =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase : Optional[Any] =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase : Optional[int] =[x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase : List[Any] =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase : Tuple ={}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase : List[Any] ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase : Dict =inspect.getsourcefile(__lowerCAmelCase )
UpperCAmelCase : int =os.path.dirname(__lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase : List[str] =[os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase : List[Any] =[]
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase : int =[]
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase : Tuple =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase : Tuple =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase )
and issubclass(__lowerCAmelCase , __lowerCAmelCase )
and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase : Dict =check_config_attributes_being_used(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
UpperCAmelCase : List[str] =unused_attributes
if len(__lowerCAmelCase ) > 0:
UpperCAmelCase : Union[str, Any] ='''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 78
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ = 32 , snake_case__ = 64 , snake_case__ = 20 , snake_case__ = 768 , snake_case__=77 , snake_case__=4 , snake_case__ = 0.0 , snake_case__ = "silu" , snake_case__ = None , snake_case__ = None , snake_case__ = "linear" , snake_case__ = "prd" , snake_case__ = None , snake_case__ = None , snake_case__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[Any] =num_attention_heads
UpperCAmelCase : Tuple =attention_head_dim
UpperCAmelCase : Union[str, Any] =num_attention_heads * attention_head_dim
UpperCAmelCase : int =additional_embeddings
UpperCAmelCase : List[str] =time_embed_dim or inner_dim
UpperCAmelCase : List[str] =embedding_proj_dim or embedding_dim
UpperCAmelCase : Any =clip_embed_dim or embedding_dim
UpperCAmelCase : Dict =Timesteps(snake_case__ , snake_case__ , 0 )
UpperCAmelCase : Tuple =TimestepEmbedding(snake_case__ , snake_case__ , out_dim=snake_case__ , act_fn=snake_case__ )
UpperCAmelCase : Union[str, Any] =nn.Linear(snake_case__ , snake_case__ )
if embedding_proj_norm_type is None:
UpperCAmelCase : Dict =None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase : int =nn.LayerNorm(snake_case__ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase : Optional[Any] =nn.Linear(snake_case__ , snake_case__ )
if encoder_hid_proj_type is None:
UpperCAmelCase : Optional[int] =None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase : Any =nn.Linear(snake_case__ , snake_case__ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase : Dict =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case__ ) )
if added_emb_type == "prd":
UpperCAmelCase : Dict =nn.Parameter(torch.zeros(1 , 1 , snake_case__ ) )
elif added_emb_type is None:
UpperCAmelCase : int =None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase : List[str] =nn.ModuleList(
[
BasicTransformerBlock(
snake_case__ , snake_case__ , snake_case__ , dropout=snake_case__ , activation_fn='''gelu''' , attention_bias=snake_case__ , )
for d in range(snake_case__ )
] )
if norm_in_type == "layer":
UpperCAmelCase : Optional[Any] =nn.LayerNorm(snake_case__ )
elif norm_in_type is None:
UpperCAmelCase : Union[str, Any] =None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase : str =nn.LayerNorm(snake_case__ )
UpperCAmelCase : List[str] =nn.Linear(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase : Optional[Any] =causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , snake_case__ , persistent=snake_case__ )
UpperCAmelCase : int =nn.Parameter(torch.zeros(1 , snake_case__ ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.zeros(1 , snake_case__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCAmelCase : Dict ={}
def fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__ ):
if hasattr(snake_case__ , '''set_processor''' ):
UpperCAmelCase : Tuple =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , snake_case__ , snake_case__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__ )
return processors
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =len(self.attn_processors.keys() )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(snake_case__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__ ):
if hasattr(snake_case__ , '''set_processor''' ):
if not isinstance(snake_case__ , snake_case__ ):
module.set_processor(snake_case__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , snake_case__ , snake_case__ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = True , ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] =hidden_states.shape[0]
UpperCAmelCase : int =timestep
if not torch.is_tensor(snake_case__ ):
UpperCAmelCase : Dict =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0:
UpperCAmelCase : Optional[int] =timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase : Tuple =timesteps * torch.ones(snake_case__ , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase : Union[str, Any] =self.time_proj(snake_case__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase : Any =timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase : Any =self.time_embedding(snake_case__ )
if self.embedding_proj_norm is not None:
UpperCAmelCase : Any =self.embedding_proj_norm(snake_case__ )
UpperCAmelCase : Dict =self.embedding_proj(snake_case__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase : Tuple =self.encoder_hidden_states_proj(snake_case__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCAmelCase : int =self.proj_in(snake_case__ )
UpperCAmelCase : Dict =self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase : List[Any] =[]
UpperCAmelCase : List[Any] =0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase : List[str] =proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase : Dict =hidden_states[:, None, :]
UpperCAmelCase : int =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase : Optional[Any] =self.prd_embedding.to(hidden_states.dtype ).expand(snake_case__ , -1 , -1 )
additional_embeds.append(snake_case__ )
UpperCAmelCase : int =torch.cat(
snake_case__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase : Any =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase : List[Any] =F.pad(
snake_case__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase : Tuple =hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase : List[str] =(1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
UpperCAmelCase : Tuple =F.pad(snake_case__ , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase : Dict =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase : Optional[int] =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase : Optional[Any] =self.norm_in(snake_case__ )
for block in self.transformer_blocks:
UpperCAmelCase : Optional[int] =block(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase : Optional[int] =self.norm_out(snake_case__ )
if self.prd_embedding is not None:
UpperCAmelCase : List[Any] =hidden_states[:, -1]
else:
UpperCAmelCase : Dict =hidden_states[:, additional_embeddings_len:]
UpperCAmelCase : List[str] =self.proj_to_clip_embeddings(snake_case__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348
| 1
|
import datasets
from .evaluate import evaluate
_UpperCAmelCase : int = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_UpperCAmelCase : Optional[Any] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_UpperCAmelCase : Any = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self: int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] ):
lowercase :Tuple = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowercase :Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowercase :List[Any] = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 158
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : List[str] = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : Tuple = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : Tuple = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : Optional[int] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_UpperCAmelCase : Optional[Any] = "3.0.12"
_UpperCAmelCase : int = None
def UpperCAmelCase__ ( ):
global _logger
lowercase :List[str] = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: Dict ):
lowercase :Any = lock_file
return None
def __str__( self: Dict ):
lowercase :str = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __lowerCAmelCase :
def __init__( self: Tuple , _lowerCAmelCase: Any ):
lowercase :Optional[Any] = lock
return None
def __enter__( self: List[Any] ):
return self.lock
def __exit__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] ):
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: int=None ):
lowercase :Any = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
lowercase :int = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase )
# The path to the lock file.
lowercase :List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase :Any = None
# The default timeout value.
lowercase :Any = timeout
# We use this lock primarily for the lock counter.
lowercase :Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase :Optional[int] = 0
return None
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return self._lock_file
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
lowercase :Tuple = float(_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Union[str, Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase :List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase :Any = id(self )
lowercase :Optional[int] = self._lock_file
lowercase :Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(_lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase :Union[str, Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase :Union[str, Any] = id(self )
lowercase :str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowercase :List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self: Tuple ):
self.acquire()
return self
def __exit__( self: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict ):
self.release()
return None
def __del__( self: Optional[Any] ):
self.release(force=_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int ):
lowercase :Union[str, Any] = os.path.basename(_lowerCAmelCase )
if len(_lowerCAmelCase ) > max_length and max_length > 0:
lowercase :Dict = os.path.dirname(_lowerCAmelCase )
lowercase :Any = str(hash(_lowerCAmelCase ) )
lowercase :Union[str, Any] = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any]=-1 , _lowerCAmelCase: List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
lowercase :Optional[int] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase :Tuple = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Any = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Any = self._lock_file_fd
lowercase :Tuple = None
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=-1 , _lowerCAmelCase: Tuple=None ):
lowercase :List[str] = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase :Optional[int] = os.open(self._lock_file , _lowerCAmelCase )
try:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Optional[Any] = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase :Dict = self._lock_file_fd
lowercase :Union[str, Any] = None
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
os.close(_lowerCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase :List[Any] = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
lowercase :int = fd
return None
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
os.close(self._lock_file_fd )
lowercase :int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : Tuple = None
if msvcrt:
_UpperCAmelCase : str = WindowsFileLock
elif fcntl:
_UpperCAmelCase : List[Any] = UnixFileLock
else:
_UpperCAmelCase : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 158
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''MobileViTFeatureExtractor''']
A__ = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any]=1_3 ,__lowercase :str=3_0 ,__lowercase :Optional[Any]=2 ,__lowercase :int=3 ,__lowercase :List[Any]=True ,__lowercase :Tuple=True ,__lowercase :List[Any]=3_2 ,__lowercase :str=2 ,__lowercase :Union[str, Any]=4 ,__lowercase :Dict=3_7 ,__lowercase :List[Any]="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :str=0.1 ,__lowercase :Union[str, Any]=1_0 ,__lowercase :Optional[Any]=0.02 ,__lowercase :Union[str, Any]=3 ,__lowercase :Any=0.6 ,__lowercase :List[str]=None ,):
snake_case__ : str = parent
snake_case__ : int = batch_size
snake_case__ : Dict = image_size
snake_case__ : List[str] = patch_size
snake_case__ : str = num_channels
snake_case__ : int = is_training
snake_case__ : List[str] = use_labels
snake_case__ : List[str] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Any = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Optional[Any] = mask_ratio
snake_case__ : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case__ : str = (image_size // patch_size) ** 2
snake_case__ : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Any = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self :List[Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def __lowerCamelCase ( self :List[str] ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :List[str] ):
snake_case__ : Optional[int] = TFViTMAEModel(config=__lowercase )
snake_case__ : int = model(__lowercase ,training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :int ):
snake_case__ : Dict = TFViTMAEForPreTraining(__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,training=__lowercase )
# expected sequence length = num_patches
snake_case__ : Optional[Any] = (self.image_size // self.patch_size) ** 2
snake_case__ : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case__ : Tuple = 1
snake_case__ : List[Any] = TFViTMAEForPreTraining(__lowercase )
snake_case__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Any = model(__lowercase ,training=__lowercase )
snake_case__ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
((snake_case__) , (snake_case__) , (snake_case__)) : Optional[Any] = config_and_inputs
snake_case__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase : Union[str, Any] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Any ):
snake_case__ : Union[str, Any] = TFViTMAEModelTester(self )
snake_case__ : Dict = ConfigTester(self ,config_class=__lowercase ,has_text_modality=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __lowerCamelCase ( self :Dict ):
pass
def __lowerCamelCase ( self :Tuple ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
snake_case__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase ,tf.keras.layers.Layer ) )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__lowercase )
snake_case__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def __lowerCamelCase ( self :int ):
# make the mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(__lowercase )
snake_case__ : Union[str, Any] = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : Any = model(__lowercase ,noise=__lowercase )
snake_case__ : Optional[int] = copy.deepcopy(self._prepare_for_class(__lowercase ,__lowercase ) )
snake_case__ : List[Any] = model(**__lowercase ,noise=__lowercase )
snake_case__ : Optional[Any] = outputs_dict[0].numpy()
snake_case__ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1e-6 )
def __lowerCamelCase ( self :Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowercase :Dict ):
snake_case__ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowercase ):
snake_case__ : Dict = v.numpy()
else:
snake_case__ : str = np.array(__lowercase )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__lowercase )
snake_case__ : List[Any] = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : Dict = prepare_numpy_arrays(__lowercase )
snake_case__ : Tuple = model(__lowercase ,noise=__lowercase )
snake_case__ : Dict = model(**__lowercase ,noise=__lowercase )
self.assert_outputs_same(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[Any] ,__lowercase :List[str] ):
# make masks reproducible
np.random.seed(2 )
snake_case__ : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case__ : Any = tf.constant(__lowercase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case__ : Optional[Any] = tf_noise
super().check_pt_tf_models(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Dict ):
# make mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowercase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__lowercase ,__lowercase ),)
if isinstance(__lowercase ,__lowercase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowercase ,'''_keras_serializable''' ,__lowercase )
}
snake_case__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case__ : Any = tf.convert_to_tensor(__lowercase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case__ : List[Any] = main_layer_class(__lowercase )
snake_case__ : Union[str, Any] = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case__ : Optional[Any] = tf.keras.Model(__lowercase ,outputs=main_layer(__lowercase ) )
snake_case__ : List[str] = model(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[str] = os.path.join(__lowercase ,'''keras_model.h5''' )
model.save(__lowercase )
snake_case__ : List[str] = tf.keras.models.load_model(
__lowercase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowercase ,tf.keras.Model )
snake_case__ : Union[str, Any] = model(__lowercase )
self.assert_outputs_same(__lowercase ,__lowercase )
@slow
def __lowerCamelCase ( self :Any ):
# make mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__lowercase )
snake_case__ : str = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : List[Any] = model(__lowercase ,noise=__lowercase )
if model_class.__name__ == "TFViTMAEModel":
snake_case__ : List[Any] = outputs.last_hidden_state.numpy()
snake_case__ : List[Any] = 0
else:
snake_case__ : Any = outputs.logits.numpy()
snake_case__ : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase ,saved_model=__lowercase )
snake_case__ : Optional[Any] = model_class.from_pretrained(__lowercase )
snake_case__ : Any = model(__lowercase ,noise=__lowercase )
if model_class.__name__ == "TFViTMAEModel":
snake_case__ : Dict = after_outputs['''last_hidden_state'''].numpy()
snake_case__ : List[Any] = 0
else:
snake_case__ : Any = after_outputs['''logits'''].numpy()
snake_case__ : Optional[Any] = 0
snake_case__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase ,1e-5 )
def __lowerCamelCase ( self :int ):
# make mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__lowercase )
snake_case__ : int = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : List[Any] = model(__lowercase ,noise=__lowercase )
snake_case__ : int = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowercase )
snake_case__ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case__ : Optional[int] = model_class.from_config(model.config )
snake_case__ : Tuple = new_model(__lowercase ) # Build model
new_model.set_weights(model.get_weights() )
snake_case__ : List[Any] = new_model(__lowercase ,noise=__lowercase )
self.assert_outputs_same(__lowercase ,__lowercase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __lowerCamelCase ( self :Tuple ):
pass
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__lowercase )
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self :List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self :Any ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
snake_case__ : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
snake_case__ : int = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Tuple = image_processor(images=__lowercase ,return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case__ : str = ViTMAEConfig()
snake_case__ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case__ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case__ : List[str] = model(**__lowercase ,noise=__lowercase )
# verify the logits
snake_case__ : Optional[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape ,__lowercase )
snake_case__ : Tuple = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,__lowercase ,atol=1e-4 )
| 230
| 1
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , R'''\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ''' , )
class __magic_name__ (_lowerCAmelCase ):
def __a ( self , _a ) -> Optional[Any]:
if self.framework == "tf":
lowerCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = self.get_masked_index(_lowercase )
lowerCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __a ( self , _a ) -> Optional[Any]:
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def __a ( self , _a , _a=None , **_a ) -> Optional[int]:
if return_tensors is None:
lowerCAmelCase_ = self.framework
lowerCAmelCase_ = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = self.model(**_lowercase )
lowerCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def __a ( self , _a , _a=5 , _a=None ) -> int:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ = target_ids.shape[0]
lowerCAmelCase_ = model_outputs["input_ids"][0]
lowerCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
lowerCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase_ = outputs.numpy()
lowerCAmelCase_ = outputs[0, masked_index, :]
lowerCAmelCase_ = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
lowerCAmelCase_ = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase_ = tf.expand_dims(_lowercase , 0 )
lowerCAmelCase_ = tf.math.top_k(_lowercase , k=_lowercase )
lowerCAmelCase_ , lowerCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ = outputs[0, masked_index, :]
lowerCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase_ = probs[..., target_ids]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(_lowercase )
lowerCAmelCase_ = []
lowerCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ = target_ids[p].tolist()
lowerCAmelCase_ = p
# Filter padding out:
lowerCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
lowerCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def __a ( self , _a , _a=None ) -> List[Any]:
if isinstance(_lowercase , _lowercase ):
lowerCAmelCase_ = [targets]
try:
lowerCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ = {}
lowerCAmelCase_ = []
for target in targets:
lowerCAmelCase_ = vocab.get(_lowercase , _lowercase )
if id_ is None:
lowerCAmelCase_ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )["input_ids"]
if len(_lowercase ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it" )
continue
lowerCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase_ = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError("At least one target must be provided when passed." )
lowerCAmelCase_ = np.array(_lowercase )
return target_ids
def __a ( self , _a=None , _a=None ) -> Optional[int]:
lowerCAmelCase_ = {}
if targets is not None:
lowerCAmelCase_ = self.get_target_ids(_lowercase , _lowercase )
lowerCAmelCase_ = target_ids
if top_k is not None:
lowerCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
lowerCAmelCase_ = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 369
|
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : str = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
A_ : Tuple = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
A_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = VOCAB_FILES_NAMES
UpperCAmelCase__: Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Optional[int] = RoFormerTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A__ ) != strip_accents
):
A__ : List[Any] = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : Optional[int] = do_lower_case
A__ : int = strip_accents
A__ : str = pre_tok_class(**A__ )
A__ : Any = do_lower_case
def __getstate__( self ):
A__ : int = self.__dict__.copy()
A__ : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__( self , A__ ):
A__ : Union[str, Any] = d
A__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
A__ : Dict = PreTokenizer.custom(JiebaPreTokenizer(A__ ) )
def __A ( self , A__ , A__=None ):
A__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Tuple = [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : Any = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ , A__=None , A__=None , A__=False , **A__ , ):
A__ : str = BertPreTokenizer()
return super().save_pretrained(A__ , A__ , A__ , A__ , **A__ )
| 192
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : List[str] = logging.get_logger()
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self , snake_case ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : bool = True
def __call__( self , snake_case ):
snake_case_ = Tracker(self.dest )(snake_case ).parametrized
snake_case_ = Tracker(self.src )(snake_case ).parametrized
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) )
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(snake_case )} operations while'''
F''' destination module has {len(snake_case )}.''' )
for dest_m, src_m in zip(snake_case , snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
snake_case_ = len(snake_case ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
snake_case_ = nn.ModuleDict(snake_case )
def a ( self , snake_case ):
return get_trunk_forward_outputs(
snake_case , out_feat_keys=snake_case , feature_blocks=self._feature_blocks , )
class lowercase ( lowercase_ ):
def a ( self , snake_case ):
snake_case_ = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , snake_case ):
# default to timm!
if x not in self:
snake_case_ = self.convert_name_to_timm(snake_case )
snake_case_ = partial(lambda: (timm.create_model(snake_case , pretrained=snake_case ).eval(), None) )
else:
snake_case_ = super().__getitem__(snake_case )
return val
class lowercase ( lowercase_ ):
def __getitem__( self , snake_case ):
if "seer" in x and "in1k" not in x:
snake_case_ = RegNetModel
else:
snake_case_ = RegNetForImageClassification
return val
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for from_key, to_key in keys:
snake_case_ = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True , ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ , snake_case_ = from_model_func()
snake_case_ = our_model_func(UpperCamelCase__ ).eval()
snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ , raise_if_mismatch=UpperCamelCase__ )
snake_case_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
if from_state_dict is not None:
snake_case_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case_ = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
snake_case_ = manually_copy_vissl_head(UpperCamelCase__ , our_model.state_dict() , UpperCamelCase__ )
our_model.load_state_dict(UpperCamelCase__ )
snake_case_ = our_model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
snake_case_ = (
our_outputs.logits if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else our_outputs.last_hidden_state
)
snake_case_ = from_model(UpperCamelCase__ )
snake_case_ = from_output[-1] if type(UpperCamelCase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case_ = our_outputs.hidden_states[-1]
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
snake_case_ = 224 if 'seer' not in name else 384
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=UpperCamelCase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
print(F'''Pushed {name}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 1000
snake_case_ = (1, num_labels)
snake_case_ = 'huggingface/label-files'
snake_case_ = num_labels
snake_case_ = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) ) , 'r' ) )
snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
snake_case_ = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
snake_case_ = NameToOurModelFuncMap()
snake_case_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(UpperCamelCase__ , UpperCamelCase__ ) -> Tuple[nn.Module, Dict]:
snake_case_ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , model_dir=str(UpperCamelCase__ ) , map_location='cpu' )
snake_case_ = model_func()
# check if we have a head, if yes add it
snake_case_ = files['classy_state_dict']['base_model']['model']
snake_case_ = model_state_dict['trunk']
model.load_state_dict(UpperCamelCase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case_ = partial(
UpperCamelCase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
UpperCamelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
UpperCamelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
return config, expected_shape
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__magic_name__ : str = parser.parse_args()
__magic_name__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 367
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=99 , snake_case=0 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_lengths
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = gelu_activation
snake_case_ = sinusoidal_embeddings
snake_case_ = causal
snake_case_ = asm
snake_case_ = n_langs
snake_case_ = vocab_size
snake_case_ = n_special
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = summary_type
snake_case_ = use_proj
snake_case_ = scope
snake_case_ = bos_token_id
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , lengths=snake_case , langs=snake_case )
snake_case_ = model(snake_case , langs=snake_case )
snake_case_ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
snake_case_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((snake_case_) , ) = result_with_labels.to_tuple()
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((snake_case_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_labels
snake_case_ = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_choices
snake_case_ = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE : int = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self , snake_case , snake_case , snake_case=False ):
snake_case_ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def a ( self ):
snake_case_ = XLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = min_length + idx + 1
snake_case_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def a ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(snake_case )
snake_case_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case ) # the president
snake_case_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case_ = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 200
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( lowercase_ = 8 ):
UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase_ )
UpperCAmelCase = i // 3
UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase = (
chars_incl
+ random(lowercase_ , quotient + remainder )
+ random(lowercase_ , lowercase_ )
+ random(lowercase_ , lowercase_ )
)
UpperCAmelCase = list(lowercase_ )
shuffle(lowercase_ )
return "".join(lowercase_ )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ):
if len(lowercase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase = any(char in ascii_uppercase for char in password )
UpperCAmelCase = any(char in ascii_lowercase for char in password )
UpperCAmelCase = any(char in digits for char in password )
UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
UpperCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowercase_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 78
| 1
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCamelCase = """\
Text data.
Second line of data."""
_UpperCamelCase = """file"""
@pytest.fixture(scope="""session""" )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
with zstd.open(_snake_case , """wb""" ) as f:
f.write(_snake_case )
return path
@pytest.fixture
def _a ( _snake_case ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , _snake_case ) , """w""" ) as f:
f.write(_snake_case )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCAmelCase = input_paths[compression_format]
UpperCAmelCase = tmp_path / """cache"""
UpperCAmelCase = DownloadConfig(cache_dir=_snake_case , extract_compressed_file=_snake_case )
UpperCAmelCase = cached_path(_snake_case , download_config=_snake_case )
with open(_snake_case ) as f:
UpperCAmelCase = f.read()
with open(_snake_case ) as f:
UpperCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = """custom_cache"""
UpperCAmelCase = """custom_extracted_dir"""
UpperCAmelCase = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCAmelCase = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _snake_case )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_snake_case ) )
UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase = xz_file
UpperCAmelCase = (
DownloadConfig(extract_compressed_file=_snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_snake_case )
)
UpperCAmelCase = cached_path(_snake_case , download_config=_snake_case )
assert Path(_snake_case ).parent.parts[-2:] == expected
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = str(Path(_snake_case ).resolve() )
assert cached_path(_snake_case ) == text_file
# relative path
UpperCAmelCase = str(Path(_snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_snake_case ) == text_file
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_snake_case ):
cached_path(_snake_case )
# relative path
UpperCAmelCase = """./__missing_file__.txt"""
with pytest.raises(_snake_case ):
cached_path(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(_snake_case ) as f:
UpperCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _snake_case )
def _a ( ):
"""simple docstring"""
with pytest.raises(_snake_case ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_snake_case ):
http_get("""https://huggingface.co""" , temp_file=_snake_case )
with pytest.raises(_snake_case ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_snake_case ):
ftp_get("""ftp://huggingface.co""" , temp_file=_snake_case )
with pytest.raises(_snake_case ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_snake_case ):
fsspec_get("""s3://huggingface.co""" , temp_file=_snake_case )
with pytest.raises(_snake_case ):
fsspec_head("""s3://huggingface.co""" )
| 234
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
_UpperCamelCase = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A=None ,A=None ,A="<pad>" ,A="</s>" ,A="<unk>" ,A="<mask_2>" ,A="<mask_1>" ,A=None ,A=103 ,**A ,):
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(A ,A ):
raise TypeError(
F'''additional_special_tokens should be of type {type(A )}, but is'''
F''' {type(A )}''' )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(A ) ,self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 ,self.offset )]
super().__init__(
A ,tokenizer_file=A ,pad_token=A ,eos_token=A ,unk_token=A ,mask_token=A ,mask_token_sent=A ,offset=A ,additional_special_tokens=A ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCamelCase ( self ,A ,A=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self ,A ,A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file ,A )
return (out_vocab_file,)
| 234
| 1
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_SCREAMING_SNAKE_CASE = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
_SCREAMING_SNAKE_CASE = "▁"
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase=100 , _lowerCAmelCase=None , _lowerCAmelCase = None , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase = [f'''<extra_id_{i}>''' for i in range(_lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase = len(set(filter(lambda _lowerCAmelCase : bool("extra_id" in str(_lowerCAmelCase ) ) , _lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_lowerCAmelCase = legacy
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , extra_ids=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = extra_ids
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@staticmethod
def _snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , _lowerCAmelCase , )
return max_model_length
@property
def _snake_case ( self ) -> Tuple:
return self.sp_model.get_piece_size() + self._extra_ids
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + [1]
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1]
def _snake_case ( self ) -> Union[str, Any]:
return list(
set(filter(lambda _lowerCAmelCase : bool(re.search(r"<extra_id_\d+>" , _lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _snake_case ( self ) -> List[Any]:
return [self._convert_token_to_id(_lowerCAmelCase ) for token in self.get_sentinel_tokens()]
def _snake_case ( self , _lowerCAmelCase ) -> List[int]:
if len(_lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = self._add_eos_if_not_present(_lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase = self._add_eos_if_not_present(_lowerCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> str:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_lowerCAmelCase = SPIECE_UNDERLINE + text.replace(_lowerCAmelCase , " " )
return super().tokenize(_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
if not self.legacy:
_lowerCAmelCase = text.startswith(_lowerCAmelCase )
if is_first:
_lowerCAmelCase = text[1:]
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(_lowerCAmelCase ):
_lowerCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _snake_case ( self , _lowerCAmelCase ) -> Dict:
if token.startswith("<extra_id_" ):
_lowerCAmelCase = re.match(r"<extra_id_(\d+)>" , _lowerCAmelCase )
_lowerCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> int:
if index < self.sp_model.get_piece_size():
_lowerCAmelCase = self.sp_model.IdToPiece(_lowerCAmelCase )
else:
_lowerCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _snake_case ( self , _lowerCAmelCase ) -> int:
_lowerCAmelCase = []
_lowerCAmelCase = ""
_lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
_lowerCAmelCase = True
_lowerCAmelCase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
_lowerCAmelCase = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 158
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = FunnelBaseModel(SCREAMING_SNAKE_CASE_ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 158
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __A ( a_ :Dict , a_ :Optional[int] , a_ :Tuple=None , a_ :Dict=None , a_ :Tuple=None , a_ :List[str]=None , a_ :Optional[int]=None , a_ :int=None , ) -> Any:
if attention_mask is None:
__a : str = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
__a : int = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
__a : int = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__a : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__a : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.0_2 , ):
__a : Dict = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : int = is_training
__a : Optional[int] = use_labels
__a : Any = vocab_size
__a : str = hidden_size
__a : Any = num_hidden_layers
__a : Any = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : Any = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : str = eos_token_id
__a : Optional[int] = pad_token_id
__a : List[Any] = bos_token_id
__a : Tuple = initializer_range
def _lowerCamelCase ( self ):
__a : Tuple = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : int = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__a : str = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self ):
__a , __a : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = 20
__a : List[Any] = model_class_name(_UpperCAmelCase )
__a : Any = model.encode(inputs_dict['''input_ids'''] )
__a , __a : List[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__a : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Dict = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : str = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__a : Union[str, Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = 20
__a : Optional[Any] = model_class_name(_UpperCAmelCase )
__a : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
__a , __a : Tuple = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Any = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : int = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__a : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = 99
def _lowerCamelCase ( self ):
__a : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a : Dict = input_ids.shape[0]
__a : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ):
__a , __a , __a : Tuple = self._get_config_and_data()
__a : List[Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Tuple = lm_model(input_ids=_UpperCAmelCase )
__a : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a : Optional[Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a : Optional[int] = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__a : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a : Union[str, Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Any = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__a : Tuple = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ):
__a : Any = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self ):
__a , __a : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Optional[int] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
__a : List[Any] = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : str = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Dict = model_class(_UpperCAmelCase )
__a : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__a : List[str] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('''JIT Enabled''' ):
__a : Tuple = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : Any = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : Tuple = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : Dict = np.ones((1, 1) ) * model.config.eos_token_id
__a : Union[str, Any] = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 188
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
__a : Any = np.random.RandomState(_UpperCAmelCase )
__a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# warmup pass to apply optimizations
__a : Any = pipe(**self.get_dummy_inputs() )
__a : List[str] = self.get_dummy_inputs()
__a : Tuple = pipe(**_UpperCAmelCase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : Optional[Any] = ort.SessionOptions()
__a : Any = False
return options
def _lowerCamelCase ( self ):
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = '''A fantasy landscape, trending on artstation'''
__a : Tuple = np.random.RandomState(0 )
__a : int = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : List[Any] = output.images
__a : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Any = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
__a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
__a : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = '''A fantasy landscape, trending on artstation'''
__a : str = np.random.RandomState(0 )
__a : str = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Dict = output.images
__a : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 188
| 1
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : List[str] = TypeVar("T")
class _snake_case ( Generic[T] ):
def __init__( self , a__ = True ) -> None:
'''simple docstring'''
snake_case_ = {} # dictionary of lists
snake_case_ = directed
def lowerCAmelCase__ ( self , a__ , a__ ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
self.adj_list[destination_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a__ )
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case_ = [destination_vertex]
snake_case_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case_ = [destination_vertex]
snake_case_ = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 85
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( __snake_case ):
__magic_name__ = '''facebook/bart-large-mnli'''
__magic_name__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__magic_name__ = '''text_classifier'''
__magic_name__ = AutoTokenizer
__magic_name__ = AutoModelForSequenceClassification
__magic_name__ = ['''text''', ['''text''']]
__magic_name__ = ['''text''']
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
super().setup()
A : Optional[int] = self.model.config
A : str = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
A : Tuple = int(SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : List[str] = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE ) , [F'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : str = outputs.logits
A : str = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 311
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( snake_case__ = "laptop" ):
'''simple docstring'''
A : Tuple = F'https://www.amazon.in/laptop/s?k={product}'
A : Optional[int] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text )
# Initialize a Pandas dataframe with the column titles
A : List[str] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A : Optional[Any] = item.ha.text
A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A : Optional[int] = '''Not available'''
try:
A : str = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A : List[Any] = ''''''
try:
A : Dict = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
A : str = float('''nan''' )
except AttributeError:
pass
A : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A : List[str] = ''' '''
A : Optional[Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase : Union[str, Any] = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 311
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {'vocab_file': 'spiece.model'}
a : List[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
a : Optional[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
a : Dict = 0
a : Dict = 1
a : str = 2
a : str = 3
a : int = 4
class UpperCamelCase_ ( _snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = """left"""
def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : Dict = do_lower_case
UpperCAmelCase : Tuple = remove_space
UpperCAmelCase : List[str] = keep_accents
UpperCAmelCase : int = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
@property
def _lowercase( self ) -> Optional[int]:
return len(self.sp_model )
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
UpperCAmelCase : str = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self , A ) -> List[str]:
UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : str = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase( self , A ) -> Any:
if self.remove_space:
UpperCAmelCase : int = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase : Optional[Any] = inputs
UpperCAmelCase : List[Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase : Any = unicodedata.normalize("""NFKD""" , __snake_case )
UpperCAmelCase : Tuple = """""".join([c for c in outputs if not unicodedata.combining(__snake_case )] )
if self.do_lower_case:
UpperCAmelCase : Dict = outputs.lower()
return outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.preprocess_text(__snake_case )
UpperCAmelCase : str = self.sp_model.encode(__snake_case , out_type=__snake_case )
UpperCAmelCase : Optional[Any] = []
for piece in pieces:
if len(__snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase : Dict = cur_pieces[1:]
else:
UpperCAmelCase : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__snake_case )
else:
new_pieces.append(__snake_case )
return new_pieces
def _lowercase( self , A ) -> Optional[Any]:
return self.sp_model.PieceToId(__snake_case )
def _lowercase( self , A ) -> int:
return self.sp_model.IdToPiece(__snake_case )
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : Optional[int] = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip()
return out_string
def _lowercase( self , A , A = False , A = None , A = True , **A , ) -> List[str]:
UpperCAmelCase : List[str] = kwargs.pop("""use_source_tokenizer""" , __snake_case )
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(__snake_case , skip_special_tokens=__snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__snake_case ) )
UpperCAmelCase : List[Any] = []
sub_texts.append(__snake_case )
else:
current_sub_text.append(__snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase : Dict = """""".join(__snake_case )
UpperCAmelCase : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase : Any = self.clean_up_tokenization(__snake_case )
return clean_text
else:
return text
def _lowercase( self , A , A = None ) -> Tuple:
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase( self , A , A = None , A = False ) -> int:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1, 1]
return ([0] * len(__snake_case )) + [1, 1]
def _lowercase( self , A , A = None ) -> Tuple:
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase( self , A , A = None ) -> Dict:
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : int = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 265
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as flax_state_f:
_SCREAMING_SNAKE_CASE : Dict = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_SCREAMING_SNAKE_CASE : Dict = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = """"""
_SCREAMING_SNAKE_CASE : str = flatten_dict(SCREAMING_SNAKE_CASE__ , sep=""".""" )
_SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
# keep track of unexpected & missing keys
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_SCREAMING_SNAKE_CASE : Any = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_SCREAMING_SNAKE_CASE : Tuple = """.""".join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
_SCREAMING_SNAKE_CASE : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
_SCREAMING_SNAKE_CASE : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 200
| 0
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> str:
UpperCAmelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=__UpperCamelCase )
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(__UpperCamelCase )
UpperCAmelCase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCAmelCase_ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCAmelCase_ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}'
# Self-Attention
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCAmelCase_ = flax_model.params['''encoder''']['''block'''][str(__UpperCamelCase )]['''layer''']
UpperCAmelCase_ = tax_attention_key
UpperCAmelCase_ = tax_attention_out
UpperCAmelCase_ = tax_attention_query
UpperCAmelCase_ = tax_attention_value
UpperCAmelCase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_global_layer_norm
if split_mlp_wi:
UpperCAmelCase_ = tax_mlp_wi_a
UpperCAmelCase_ = tax_mlp_wi_a
else:
UpperCAmelCase_ = tax_mlp_wi
UpperCAmelCase_ = tax_mlp_wo
UpperCAmelCase_ = tax_mlp_layer_norm
UpperCAmelCase_ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_encoder_global_rel_embedding
# Assigning
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCAmelCase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}'
# Self-Attention
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCAmelCase_ = flax_model.params['''decoder''']['''block'''][str(__UpperCamelCase )]['''layer''']
UpperCAmelCase_ = tax_attention_key
UpperCAmelCase_ = tax_attention_out
UpperCAmelCase_ = tax_attention_query
UpperCAmelCase_ = tax_attention_value
UpperCAmelCase_ = tax_pre_attention_layer_norm
UpperCAmelCase_ = tax_enc_dec_attention_key
UpperCAmelCase_ = tax_enc_dec_attention_out
UpperCAmelCase_ = tax_enc_dec_attention_query
UpperCAmelCase_ = tax_enc_dec_attention_value
UpperCAmelCase_ = tax_cross_layer_norm
if split_mlp_wi:
UpperCAmelCase_ = tax_mlp_wi_a
UpperCAmelCase_ = tax_mlp_wi_a
else:
UpperCAmelCase_ = tax_mlp_wi
UpperCAmelCase_ = tax_mlp_wo
UpperCAmelCase_ = txa_mlp_layer_norm
UpperCAmelCase_ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCAmelCase_ = txa_decoder_norm
# Only for layer 0:
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_decoder_rel_embedding
# Token Embeddings
UpperCAmelCase_ = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCAmelCase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__UpperCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 177
|
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[int] , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1_00 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[float] = None , lowerCamelCase__ : bool = True , ) ->Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
_UpperCAmelCase : int = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCAmelCase : int = audio_length_in_s * self.unet.config.sample_rate
_UpperCAmelCase : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
_UpperCAmelCase : Union[str, Any] = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
_UpperCAmelCase : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
_UpperCAmelCase : List[str] = int(lowerCamelCase__ )
_UpperCAmelCase : str = next(iter(self.unet.parameters() ) ).dtype
_UpperCAmelCase : List[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : Any = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , device=audio.device )
_UpperCAmelCase : Tuple = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : int = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCAmelCase : Optional[int] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = audio.clamp(-1 , 1 ).float().cpu().numpy()
_UpperCAmelCase : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 234
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
lowerCamelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = BertTokenizer
def __init__( self : int , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : str = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Any = strip_accents
_UpperCAmelCase : List[Any] = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 234
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase , UpperCAmelCase : List[str] = coefficient_matrix.shape
UpperCAmelCase , UpperCAmelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase : int = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(UpperCAmelCase_ )
if colsa != 1:
UpperCAmelCase : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(UpperCAmelCase_ )
if rowsa != rowsa:
UpperCAmelCase : Optional[int] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != rowsa:
UpperCAmelCase : Union[str, Any] = (
'Number of initial values must be equal to number of rows in coefficient '
F"""matrix but received {len(UpperCAmelCase_ )} and {rowsa}"""
)
raise ValueError(UpperCAmelCase_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCAmelCase , UpperCAmelCase : Dict = table.shape
strictly_diagonally_dominant(UpperCAmelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = []
for row in range(UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 0
for col in range(UpperCAmelCase_ ):
if col == row:
UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase : Optional[Any] = (temp + val) / denom
new_val.append(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = new_val
return [float(UpperCAmelCase_ ) for i in new_val]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase , UpperCAmelCase : List[Any] = table.shape
UpperCAmelCase : int = True
for i in range(0 , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase__ = 3
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase : Union[str, Any] = random.randrange(3 , UpperCAmelCase_ )
if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1:
continue
if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1:
continue
return g
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating prime p...' )
UpperCAmelCase : str = rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number.
UpperCAmelCase : List[str] = primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p.
UpperCAmelCase : List[Any] = random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase : List[Any] = cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
UpperCAmelCase : Tuple = (key_size, e_a, e_a, p)
UpperCAmelCase : Optional[int] = (key_size, d)
return public_key, private_key
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase : Dict = generate_key(UpperCAmelCase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase( ):
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 280
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 188
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : str ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 188
| 1
|
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase ( a_ ) -> np.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def lowerCamelCase ( a_ ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def lowerCamelCase ( a_ , a_ ) -> np.ndarray:
lowerCAmelCase_ = np.zeros_like(a_ )
lowerCAmelCase_ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase_ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase_ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase_ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCamelCase_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowerCamelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
lowerCamelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCamelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCamelCase_ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 14
|
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : Optional[int] = 1_3
UpperCAmelCase : Any = 7
UpperCAmelCase : Any = True
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = 9_9
UpperCAmelCase : List[Any] = 3_8_4
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : str = 3_7
UpperCAmelCase : int = "gelu"
UpperCAmelCase : Any = 0.1
UpperCAmelCase : Any = 0.1
UpperCAmelCase : List[str] = 5_1_2
UpperCAmelCase : Dict = 1_6
UpperCAmelCase : Any = 2
UpperCAmelCase : Any = 0.02
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : Dict = 4
UpperCAmelCase : Any = 1_2_8
UpperCAmelCase : Optional[Any] = 2
UpperCAmelCase : int = 9
UpperCAmelCase : int = 1
UpperCAmelCase : Tuple = None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Dict = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFConvBertModel(config=snake_case )
UpperCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase : Optional[Any] = model(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = TFConvBertForMaskedLM(config=snake_case )
UpperCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase : Optional[int] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : List[Any] = TFConvBertForSequenceClassification(config=snake_case )
UpperCAmelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase : Any = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_choices
UpperCAmelCase : int = TFConvBertForMultipleChoice(config=snake_case )
UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Any = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Tuple = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Any = TFConvBertForTokenClassification(config=snake_case )
UpperCAmelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = TFConvBertForQuestionAnswering(config=snake_case )
UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = config_and_inputs
UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFConvBertModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = True
UpperCAmelCase : Tuple = True
if hasattr(snake_case , "use_cache" ):
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase : Optional[Any] = getattr(self.model_tester , "key_length" , snake_case )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = self._prepare_for_class(snake_case , snake_case )
UpperCAmelCase : List[str] = model_class(snake_case )
UpperCAmelCase : Dict = len(model(snake_case ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case , saved_model=snake_case )
UpperCAmelCase : List[Any] = os.path.join(snake_case , "saved_model" , "1" )
UpperCAmelCase : int = tf.keras.models.load_model(snake_case )
UpperCAmelCase : Optional[int] = model(snake_case )
if self.is_encoder_decoder:
UpperCAmelCase : str = outputs["encoder_hidden_states"]
UpperCAmelCase : str = outputs["encoder_attentions"]
else:
UpperCAmelCase : Dict = outputs["hidden_states"]
UpperCAmelCase : str = outputs["attentions"]
self.assertEqual(len(snake_case ) , snake_case )
UpperCAmelCase : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = True
UpperCAmelCase : Optional[int] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase : int = getattr(self.model_tester , "key_length" , snake_case )
UpperCAmelCase : int = getattr(self.model_tester , "key_length" , snake_case )
def check_decoder_attentions_output(snake_case ):
UpperCAmelCase : Any = len(snake_case )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case ):
UpperCAmelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : int = model(self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : Optional[Any] = len(snake_case )
self.assertEqual(config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
if self.is_encoder_decoder:
UpperCAmelCase : int = model_class(snake_case )
UpperCAmelCase : Tuple = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(config.output_hidden_states , snake_case )
check_decoder_attentions_output(snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
# Check attention is always last and order is fine
UpperCAmelCase : Any = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[str] = model_class(snake_case )
UpperCAmelCase : Tuple = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case ) )
self.assertEqual(model.config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : int = model(snake_case )[0]
UpperCAmelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 311
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 1
|
def lowerCAmelCase( __lowerCamelCase ):
__a = current_set.copy()
for row_index, row in enumerate(__lowerCamelCase ):
__a = row[0]
for column_index, column in enumerate(__lowerCamelCase ):
if magnitude == 0:
__a = column
continue
__a = column / magnitude
# Subtract to cancel term
__a = current_set[0]
__a = [first_row]
__a = current_set[1::]
for row in current_set:
__a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__lowerCamelCase )
continue
for column_index in range(len(__lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__a = final_set[0]
__a = []
__a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__a = simplify(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __lowerCamelCase )
__a = resultant
return final_set
def lowerCAmelCase( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
__a = len(__lowerCamelCase ) + 1
if any(len(__lowerCamelCase ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(__lowerCamelCase , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(__lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__a = equations.copy()
if any(0 in row for row in data_set ):
__a = data_set.copy()
__a = []
for row_index, row in enumerate(__lowerCamelCase ):
if 0 not in row:
__a = data_set.pop(__lowerCamelCase )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , __lowerCamelCase )
__a = data_set.copy()
__a = simplify(__lowerCamelCase )
__a = simplified[::-1]
__a = []
for row in simplified:
__a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__a = row.copy()[: len(__lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__lowerCamelCase ) == 0:
solutions.append(0 )
continue
__a = temp_row[1::]
__a = temp_row[::-1]
for column_index, column in enumerate(__lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__lowerCamelCase )
__a = []
for item in solutions:
final.append(float(round(__lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 355
|
from math import sqrt
def lowerCAmelCase( __lowerCamelCase ):
__a = 0
for i in range(1 , int(sqrt(__lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCamelCase ):
total += i + n // i
elif i == sqrt(__lowerCamelCase ):
total += i
return total - n
def lowerCAmelCase( __lowerCamelCase = 1_0000 ):
__a = sum(
i
for i in range(1 , __lowerCamelCase )
if sum_of_divisors(sum_of_divisors(__lowerCamelCase ) ) == i and sum_of_divisors(__lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 197
| 0
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__A = re.compile(R"\b(a|an|the)\b", re.UNICODE)
__A = None
def SCREAMING_SNAKE_CASE__ ( ) -> int:
lowercase__: Dict = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__UpperCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__UpperCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
lowercase__: int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__: Union[str, Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
def remove_articles(__UpperCAmelCase ):
return ARTICLES_REGEX.sub(''' ''' , __UpperCAmelCase )
def white_space_fix(__UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase ):
lowercase__: int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCAmelCase ) ) ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
if not s:
return []
return normalize_answer(__UpperCAmelCase ).split()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
return int(normalize_answer(__UpperCAmelCase ) == normalize_answer(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: int = get_tokens(__UpperCAmelCase )
lowercase__: Any = get_tokens(__UpperCAmelCase )
lowercase__: Tuple = collections.Counter(__UpperCAmelCase ) & collections.Counter(__UpperCAmelCase )
lowercase__: Optional[int] = sum(common.values() )
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase__: List[Any] = 1.0 * num_same / len(__UpperCAmelCase )
lowercase__: Union[str, Any] = 1.0 * num_same / len(__UpperCAmelCase )
lowercase__: List[str] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: str = {}
lowercase__: str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__: List[Any] = qa['''id''']
lowercase__: Union[str, Any] = [t for t in qa['''answers''']['''text'''] if normalize_answer(__UpperCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__: str = ['''''']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
lowercase__: int = preds[qid]
# Take max over all gold answers
lowercase__: Union[str, Any] = max(compute_exact(__UpperCAmelCase , __UpperCAmelCase ) for a in gold_answers )
lowercase__: int = max(compute_fa(__UpperCAmelCase , __UpperCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowercase__: List[Any] = {}
for qid, s in scores.items():
lowercase__: int = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__: Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
lowercase__: Optional[int] = s
return new_scores
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
if not qid_list:
lowercase__: str = len(__UpperCAmelCase )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
lowercase__: Union[str, Any] = len(__UpperCAmelCase )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
for k in new_eval:
lowercase__: Tuple = new_eval[k]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
plt.step(__UpperCAmelCase , __UpperCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__UpperCAmelCase , __UpperCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(__UpperCAmelCase )
plt.savefig(__UpperCAmelCase )
plt.clf()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
lowercase__: List[Any] = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : na_probs[k] )
lowercase__: List[Any] = 0.0
lowercase__: Optional[Any] = 1.0
lowercase__: Any = 0.0
lowercase__: Any = [1.0]
lowercase__: Tuple = [0.0]
lowercase__: Dict = 0.0
for i, qid in enumerate(__UpperCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__: Optional[Any] = true_pos / float(i + 1 )
lowercase__: List[Any] = true_pos / float(__UpperCAmelCase )
if i == len(__UpperCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCAmelCase )
recalls.append(__UpperCAmelCase )
if out_image:
plot_pr_curve(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return {"ap": 1_0_0.0 * avg_prec}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if out_image_dir and not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
lowercase__: Union[str, Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase__: List[str] = make_precision_recall_eval(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , out_image=os.path.join(__UpperCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
lowercase__: Dict = make_precision_recall_eval(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , out_image=os.path.join(__UpperCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
lowercase__: List[Any] = {k: float(__UpperCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase__: int = make_precision_recall_eval(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , out_image=os.path.join(__UpperCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__UpperCAmelCase , __UpperCAmelCase , '''pr_exact''' )
merge_eval(__UpperCAmelCase , __UpperCAmelCase , '''pr_f1''' )
merge_eval(__UpperCAmelCase , __UpperCAmelCase , '''pr_oracle''' )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if not qid_list:
return
lowercase__: Dict = [na_probs[k] for k in qid_list]
lowercase__: Union[str, Any] = np.ones_like(__UpperCAmelCase ) / float(len(__UpperCAmelCase ) )
plt.hist(__UpperCAmelCase , weights=__UpperCAmelCase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__UpperCAmelCase , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase__: Optional[int] = num_no_ans
lowercase__: List[Any] = cur_score
lowercase__: Tuple = 0.0
lowercase__: Optional[int] = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : na_probs[k] )
for i, qid in enumerate(__UpperCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__: Tuple = scores[qid]
else:
if preds[qid]:
lowercase__: Optional[int] = -1
else:
lowercase__: Optional[int] = 0
cur_score += diff
if cur_score > best_score:
lowercase__: int = cur_score
lowercase__: Optional[Any] = na_probs[qid]
return 1_0_0.0 * best_score / len(__UpperCAmelCase ), best_thresh
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowercase__, lowercase__: str = find_best_thresh(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = find_best_thresh(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[int] = best_exact
lowercase__: Tuple = exact_thresh
lowercase__: Optional[int] = best_fa
lowercase__: str = fa_thresh
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
with open(OPTS.data_file ) as f:
lowercase__: Union[str, Any] = json.load(__UpperCAmelCase )
lowercase__: Tuple = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
lowercase__: List[str] = json.load(__UpperCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase__: List[Any] = json.load(__UpperCAmelCase )
else:
lowercase__: List[Any] = {k: 0.0 for k in preds}
lowercase__: Any = make_qid_to_has_ans(__UpperCAmelCase ) # maps qid to True/False
lowercase__: Dict = [k for k, v in qid_to_has_ans.items() if v]
lowercase__: Tuple = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__, lowercase__: Union[str, Any] = get_raw_scores(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = apply_no_ans_threshold(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , OPTS.na_prob_thresh )
lowercase__: List[str] = apply_no_ans_threshold(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , OPTS.na_prob_thresh )
lowercase__: Tuple = make_eval_dict(__UpperCAmelCase , __UpperCAmelCase )
if has_ans_qids:
lowercase__: Any = make_eval_dict(__UpperCAmelCase , __UpperCAmelCase , qid_list=__UpperCAmelCase )
merge_eval(__UpperCAmelCase , __UpperCAmelCase , '''HasAns''' )
if no_ans_qids:
lowercase__: Optional[Any] = make_eval_dict(__UpperCAmelCase , __UpperCAmelCase , qid_list=__UpperCAmelCase )
merge_eval(__UpperCAmelCase , __UpperCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCAmelCase , __UpperCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__UpperCAmelCase , __UpperCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
else:
print(json.dumps(__UpperCAmelCase , indent=2 ) )
if __name__ == "__main__":
__A = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 177
|
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowercase__: Optional[Any] = FunnelConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__: List[Any] = FunnelBaseModel(__UpperCAmelCase ) if base_model else FunnelModel(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 177
| 1
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case =argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
__snake_case =parser.parse_args()
__snake_case ="""cpu"""
__snake_case ="""a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
__snake_case ="""path-to-your-trained-model"""
__snake_case =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case =pipe.to(device)
# to channels last
__snake_case =pipe.unet.to(memory_format=torch.channels_last)
__snake_case =pipe.vae.to(memory_format=torch.channels_last)
__snake_case =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case =torch.randn(2, 4, 64, 64)
__snake_case =torch.rand(1) * 999
__snake_case =torch.randn(2, 77, 768)
__snake_case =(sample, timestep, encoder_hidden_status)
try:
__snake_case =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case =666
__snake_case =torch.Generator(device).manual_seed(seed)
__snake_case ={"""generator""": generator}
if args.steps is not None:
__snake_case =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 55
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=9_9 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : List[str]=None , ) -> str:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> int:
lowerCAmelCase = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , ) -> Tuple:
lowerCAmelCase = True
lowerCAmelCase = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , ) -> List[str]:
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , ) -> List[str]:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : Dict = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase : int = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = FalconModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase , *lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , 'use_cache' ):
return
lowerCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
lowerCAmelCase = True
lowerCAmelCase = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase = (
getattr(UpperCAmelCase__ , 'decoder_layers' , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , 'num_decoder_layers' , UpperCAmelCase__ )
or config.num_hidden_layers
)
lowerCAmelCase = getattr(UpperCAmelCase__ , 'num_kv_heads' , config.num_attention_heads )
lowerCAmelCase = getattr(UpperCAmelCase__ , 'd_model' , config.hidden_size )
lowerCAmelCase = embed_dim // num_attention_heads
lowerCAmelCase = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = inputs['input_ids'].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCAmelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
lowerCAmelCase = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=1_9 )
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280
|
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowerCamelCase =TapasConfig.from_json_file(_UpperCAmelCase )
# set absolute/relative position embeddings parameter
lowerCamelCase =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCamelCase =TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
lowerCamelCase =4
lowerCamelCase =True
# hparam_utils.py hparams
lowerCamelCase =0.6_6_4_6_9_4
lowerCamelCase =0.2_0_7_9_5_1
lowerCamelCase =0.1_2_1_1_9_4
lowerCamelCase =True
lowerCamelCase =True
lowerCamelCase =False
lowerCamelCase =0.0_3_5_2_5_1_3
lowerCamelCase =TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCamelCase =4
lowerCamelCase =False
# hparam_utils.py hparams
lowerCamelCase =3_6.4_5_1_9
lowerCamelCase =0.9_0_3_4_2_1
lowerCamelCase =2_2_2.0_8_8
lowerCamelCase =True
lowerCamelCase =True
lowerCamelCase =True
lowerCamelCase =0.7_6_3_1_4_1
lowerCamelCase =TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "TABFACT":
lowerCamelCase =TapasForSequenceClassification(config=_UpperCAmelCase )
elif task == "MLM":
lowerCamelCase =TapasForMaskedLM(config=_UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCamelCase =TapasModel(config=_UpperCAmelCase )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCAmelCase )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCamelCase =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=5_12 )
tokenizer.save_pretrained(_UpperCAmelCase )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase__ : Optional[int] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 262
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( a , a , unittest.TestCase ):
__A = IFInpaintingSuperResolutionPipeline
__A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__A = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _snake_case ( self ):
return self._get_superresolution_dummy_components()
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith("""mps""" ):
lowerCamelCase =torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
self._test_save_load_local()
def _snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 262
| 1
|
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray:
"""simple docstring"""
A__ , A__ , A__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
A__ = np.zeros_like(lowercase_ )
A__ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A__ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A__ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A__ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_lowerCamelCase : Tuple = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
_lowerCamelCase : Any = np.array(Image.open(lena_path))
# kernel to be applied
_lowerCamelCase : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_lowerCamelCase : List[str] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_lowerCamelCase : Dict = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 14
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a__ ) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case_ = deprecated_arg[3:]
snake_case_ = not kwargs.pop(a__ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
snake_case_ = kwargs.pop("tpu_name" , self.tpu_name )
snake_case_ = kwargs.pop("device_idx" , self.device_idx )
snake_case_ = kwargs.pop("eager_mode" , self.eager_mode )
snake_case_ = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**a__ )
lowerCAmelCase_ : str = field(
default=lowercase_ , metadata={"help": "Name of TPU"} , )
lowerCAmelCase_ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCAmelCase_ : bool = field(default=lowercase_ , metadata={"help": "Benchmark models in eager model."} )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def lowerCAmelCase__ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["tf"] )
snake_case_ = None
if self.tpu:
try:
if self.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
snake_case_ = None
return tpu
@cached_property
def lowerCAmelCase__ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
snake_case_ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
snake_case_ = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
snake_case_ = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def lowerCAmelCase__ ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def lowerCAmelCase__ ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 92
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Dict = ["flax"]
def __init__( self , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Dict = ["flax"]
def __init__( self , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Optional[Any] = ["flax"]
def __init__( self , *a__ , **a__ ) -> str:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Optional[Any] = ["flax"]
def __init__( self , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : int = ["flax"]
def __init__( self , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Optional[Any] = ["flax"]
def __init__( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Dict = ["flax"]
def __init__( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : int = ["flax"]
def __init__( self , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Any = ["flax"]
def __init__( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : str = ["flax"]
def __init__( self , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Tuple = ["flax"]
def __init__( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
| 92
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _A :
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
def A__ ( self ):
"""simple docstring"""
raise NotImplementedError()
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = tokenizer
lowercase = skip_prompt
lowercase = decode_kwargs
# variables used in the streaming process
lowercase = []
lowercase = 0
lowercase = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowercase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase = text[self.print_len :]
self.print_len += len(__lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__lowerCAmelCase )
self.on_finalized_text(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
else:
lowercase = """"""
lowercase = True
self.on_finalized_text(__lowerCAmelCase , stream_end=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
print(__lowerCAmelCase , flush=__lowerCAmelCase , end="""""" if not stream_end else None )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
lowercase = Queue()
lowercase = None
lowercase = timeout
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
self.text_queue.put(__lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def A__ ( self ):
"""simple docstring"""
lowercase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 197
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 1_8, 2]
lowercase__: Any = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__: Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__: Tuple = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__: Any = [3, 3, 3, 3]
lowercase__: Any = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__: Optional[int] = [4, 4, 4, 4]
lowercase__: Dict = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__: Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__: Dict = [3, 3, 3, 3]
else:
lowercase__: List[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__: Any = 9_6
elif "small" in model_name:
lowercase__: Optional[int] = 9_6
elif "base" in model_name:
lowercase__: Optional[Any] = 1_2_8
elif "large" in model_name:
lowercase__: List[str] = 1_9_2
elif "xlarge" in model_name:
lowercase__: str = 2_5_6
elif "huge" in model_name:
lowercase__: Dict = 3_5_2
# set label information
lowercase__: Tuple = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__: Any = '''imagenet-22k-id2label.json'''
else:
lowercase__: Dict = '''imagenet-1k-id2label.json'''
lowercase__: Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__: int = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase__: Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__: Any = FocalNetConfig(
embed_dim=__UpperCAmelCase , depths=__UpperCAmelCase , focal_levels=__UpperCAmelCase , focal_windows=__UpperCAmelCase , use_conv_embed=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase , use_post_layernorm=__UpperCAmelCase , use_layerscale=__UpperCAmelCase , )
return config
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
if "patch_embed.proj" in name:
lowercase__: Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__: Any = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__: Union[str, Any] = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__: List[Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__: List[str] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__: Dict = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__: Any = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__: Any = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__: Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__: Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__: str = '''layernorm.bias'''
if "head" in name:
lowercase__: Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__: int = '''focalnet.''' + name
return name
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
# fmt: off
lowercase__: Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__: Union[str, Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __UpperCAmelCase )
lowercase__: Tuple = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__: Optional[Any] = state_dict.pop(__UpperCAmelCase )
lowercase__: str = val
lowercase__: Optional[int] = get_focalnet_config(__UpperCAmelCase )
lowercase__: Union[str, Any] = FocalNetForImageClassification(__UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(__UpperCAmelCase )
# verify conversion
lowercase__: Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__: Optional[Any] = BitImageProcessor(
do_resize=__UpperCAmelCase , size={'''shortest_edge''': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCAmelCase , crop_size=2_2_4 , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , )
lowercase__: List[str] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
lowercase__: Optional[int] = processor(images=__UpperCAmelCase , return_tensors='''pt''' )
lowercase__: Optional[int] = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__: str = image_transforms(__UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __UpperCAmelCase , atol=1e-4 )
lowercase__: str = model(**__UpperCAmelCase )
lowercase__: Union[str, Any] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__: Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__: Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__: List[Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__: List[Any] = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__: Union[str, Any] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__: Any = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2
| 1
|
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
self.add_vertex(UpperCamelCase )
self.add_vertex(UpperCamelCase )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCamelCase ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda UpperCamelCase : e[2] )
for i in range(len(UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self ):
"""simple docstring"""
lowerCamelCase_ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def snake_case ( self ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def snake_case ( UpperCamelCase=None , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(UpperCamelCase )
for edge in edges:
g.add_edge(*UpperCamelCase )
return g
class snake_case :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self ):
"""simple docstring"""
return len(self.parent )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if item in self.parent:
return self.find(UpperCamelCase )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(UpperCamelCase )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.find(UpperCamelCase )
lowerCamelCase_ = self.find(UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def snake_case ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(UpperCamelCase )
lowerCamelCase_ = union_find.find(UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(UpperCamelCase ) != union_find.find(UpperCamelCase ):
union_find.union(UpperCamelCase , UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=UpperCamelCase )
return mst
| 55
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase = 13 , UpperCamelCase = 64 , UpperCamelCase = 2 , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 128 , UpperCamelCase=[16, 32, 64, 128] , UpperCamelCase = 7 , UpperCamelCase = 4 , UpperCamelCase = 37 , UpperCamelCase = "gelu" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 10 , UpperCamelCase = 0.02 , UpperCamelCase = 2 , UpperCamelCase = 1 , UpperCamelCase = 128 , UpperCamelCase = [2, 2, 2, 2] , UpperCamelCase = 2 , UpperCamelCase = 2 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = num_attention_outputs
lowerCamelCase_ = embed_dim
lowerCamelCase_ = embed_dim + 1
lowerCamelCase_ = resolution
lowerCamelCase_ = depths
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = dim
lowerCamelCase_ = mlp_expansion_ratio
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerModel(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerModelTester(self )
lowerCamelCase_ = ConfigTester(
self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCamelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCamelCase_ = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEfficientFormerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "chunk_length" , UpperCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCamelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def snake_case ( self ):
"""simple docstring"""
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase_ = model_class(UpperCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase_ = model(UpperCamelCase )
self.assertTrue(outputs_dict is not None )
def __snake_case ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" )
# forward pass
lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" )
# forward pass
lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 55
| 1
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase = "" , _UpperCAmelCase = False):
'''simple docstring'''
__A : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__A : Dict = is_leaf
__A : Optional[int] = prefix
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = 0
for q, w in zip(self.prefix , _UpperCAmelCase):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for word in words:
self.insert(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.prefix == word:
__A : Optional[int] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__A : int = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase)
else:
__A : List[Any] = self.nodes[word[0]]
__A ,__A ,__A : Union[str, Any] = incoming_node.match(
_UpperCAmelCase)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCAmelCase)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__A : Union[str, Any] = remaining_prefix
__A : Union[str, Any] = self.nodes[matching_string[0]]
__A : Dict = RadixNode(_UpperCAmelCase , _UpperCAmelCase)
__A : str = aux_node
if remaining_word == "":
__A : Dict = True
else:
self.nodes[matching_string[0]].insert(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.nodes.get(word[0] , _UpperCAmelCase)
if not incoming_node:
return False
else:
__A ,__A ,__A : Dict = incoming_node.match(
_UpperCAmelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.nodes.get(word[0] , _UpperCAmelCase)
if not incoming_node:
return False
else:
__A ,__A ,__A : Tuple = incoming_node.match(
_UpperCAmelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCAmelCase)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
__A : int = list(self.nodes.values())[0]
__A : int = merging_node.is_leaf
self.prefix += merging_node.prefix
__A : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
__A : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
__A : List[Any] = list(incoming_node.nodes.values())[0]
__A : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__A : Union[str, Any] = merging_node.nodes
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = 0):
'''simple docstring'''
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def _lowerCAmelCase ( ) -> bool:
__A : Tuple = 'banana bananas bandana band apple all beast'.split()
__A : int = RadixNode()
root.insert_many(__snake_case )
assert all(root.find(__snake_case ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def _lowerCAmelCase ( ) -> None:
assert test_trie()
def _lowerCAmelCase ( ) -> None:
__A : List[str] = RadixNode()
__A : Dict = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__snake_case )
print('Words:' , __snake_case )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 190
|
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ : List[Any] = '''bert-base-cased'''
lowercase__ : Union[str, Any] = '''google/pegasus-xsum'''
lowercase__ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase__ : Optional[Any] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase__ : str = '''patrickvonplaten/t5-tiny-random'''
lowercase__ : List[str] = '''sshleifer/bart-tiny-random'''
lowercase__ : List[str] = '''sshleifer/tiny-mbart'''
lowercase__ : str = '''sshleifer/tiny-marian-en-de'''
def _lowerCAmelCase ( __snake_case : Path , __snake_case : list ) -> str:
__A : Any = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case , f'{split}.source' ) , __snake_case )
_dump_articles(os.path.join(__snake_case , f'{split}.target' ) , __snake_case )
return tmp_dir
class SCREAMING_SNAKE_CASE (a__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : int = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : str = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Dict = 4
__A : Optional[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__A ,__A : Any = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
__A : List[str] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , )
__A : Any = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__A : Optional[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : Tuple = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : Any = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Optional[int] = 4
__A : Any = LegacySeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=20 , max_target_length=_UpperCAmelCase , )
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25')
__A : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__A : List[str] = tmp_dir.joinpath('train.source').open().readlines()
__A : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(_UpperCAmelCase , _UpperCAmelCase , 128 , _UpperCAmelCase)
__A : Dict = {x.name for x in tmp_dir.iterdir()}
__A : Dict = {x.name for x in save_dir.iterdir()}
__A : str = save_dir.joinpath('train.source').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_UpperCAmelCase) < len(_UpperCAmelCase)
assert len(_UpperCAmelCase) == 1
assert len(packed_examples[0]) == sum(len(_UpperCAmelCase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
__A ,__A ,__A : List[Any] = self._get_dataset(max_len=64)
__A : Union[str, Any] = 64
__A : List[Any] = ds.make_dynamic_sampler(_UpperCAmelCase , required_batch_size_multiple=_UpperCAmelCase)
__A : Union[str, Any] = [len(_UpperCAmelCase) for x in batch_sampler]
assert len(set(_UpperCAmelCase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_UpperCAmelCase) == len(_UpperCAmelCase) # no dropped or added examples
__A : List[Any] = DataLoader(_UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Optional[int] = []
__A : Tuple = []
for batch in data_loader:
__A : Optional[int] = batch['input_ids'].shape
__A : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__A : Tuple = np.product(batch['input_ids'].shape)
num_src_per_batch.append(_UpperCAmelCase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(_UpperCAmelCase)
assert num_src_per_batch[0] == max(_UpperCAmelCase)
if failures:
raise AssertionError(F'too many tokens in {len(_UpperCAmelCase)} batches')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Optional[int] = self._get_dataset(max_len=512)
__A : Optional[int] = 2
__A : Dict = ds.make_sortish_sampler(_UpperCAmelCase , shuffle=_UpperCAmelCase)
__A : Tuple = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCAmelCase)
__A : str = tokenizer.pad_token_id
def count_pad_tokens(_UpperCAmelCase , _UpperCAmelCase="input_ids"):
return [batch[k].eq(_UpperCAmelCase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_UpperCAmelCase , k='labels')) < sum(count_pad_tokens(_UpperCAmelCase , k='labels'))
assert sum(count_pad_tokens(_UpperCAmelCase)) < sum(count_pad_tokens(_UpperCAmelCase))
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=1000 , _UpperCAmelCase=128):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , _UpperCAmelCase):
__A : Dict = 'examples/seq2seq/wmt_en_ro'
__A : Any = max_len * 2 * 64
if not Path(_UpperCAmelCase).joinpath('train.len').exists():
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
else:
__A : int = 'examples/seq2seq/test_data/wmt_en_ro'
__A : Any = max_len * 4
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , n_obs=_UpperCAmelCase , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Tuple = self._get_dataset()
__A : Optional[int] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCAmelCase))
__A : List[str] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCAmelCase))
assert idsa.intersection(_UpperCAmelCase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
if tok_name == MBART_TINY:
__A : Dict = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
__A : List[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__A : Any = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , )
__A : List[str] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_UpperCAmelCase) == 1 if tok_name == BART_TINY else len(_UpperCAmelCase) == 0
| 190
| 1
|
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 262
|
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans
| 262
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
a = VideoClassificationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase , top_k=2 )
a = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __lowerCAmelCase ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ) ->int:
"""simple docstring"""
for example in examples:
a = video_classifier(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )},
] , )
@require_torch
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
a = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
a = pipeline(
'''video-classification''' , model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , frame_sampling_rate=4 )
a = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
a = video_classifier(__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
a = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
pass
| 26
|
import math
def _a ( a :int = 100 ) -> int:
a = sum(i * i for i in range(1 , n + 1 ) )
a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = {"""facebook/bart-base""": BartForConditionalGeneration}
UpperCamelCase__ = {"""facebook/bart-base""": BartTokenizer}
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--config_name" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=SCREAMING_SNAKE_CASE_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Where to store the final ONNX file." )
__lowerCAmelCase = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]="cpu" ):
__lowerCAmelCase = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ )
if model_name in ["facebook/bart-base"]:
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = 0
return huggingface_model, tokenizer
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
model.eval()
__lowerCAmelCase = None
__lowerCAmelCase = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE_ ) )
with torch.no_grad():
__lowerCAmelCase = "My friends are cool but they eat too many carbs."
__lowerCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt" ).to(model.device )
__lowerCAmelCase = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , early_stopping=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=SCREAMING_SNAKE_CASE_ , )
logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = ort_sess.run(
SCREAMING_SNAKE_CASE_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(SCREAMING_SNAKE_CASE_ ),
"max_length": np.array(SCREAMING_SNAKE_CASE_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _a ( ):
__lowerCAmelCase = parse_args()
__lowerCAmelCase = 5
__lowerCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__lowerCAmelCase = torch.device(args.device )
__lowerCAmelCase , __lowerCAmelCase = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(SCREAMING_SNAKE_CASE_ )
if args.max_length:
__lowerCAmelCase = args.max_length
if args.num_beams:
__lowerCAmelCase = args.num_beams
if args.output_file_path:
__lowerCAmelCase = args.output_file_path
else:
__lowerCAmelCase = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 92
|
import argparse
import os
import re
import packaging.version
UpperCamelCase__ = """examples/"""
UpperCamelCase__ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase__ = """README.md"""
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
def _a ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ):
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
__lowerCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _a ( ):
__lowerCAmelCase = get_version()
__lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
__lowerCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 92
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ = [3, 3, 3, 3]
lowercase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ = [4, 4, 4, 4]
lowercase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ = [3, 3, 3, 3]
else:
lowercase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ = 96
elif "small" in model_name:
lowercase__ = 96
elif "base" in model_name:
lowercase__ = 128
elif "large" in model_name:
lowercase__ = 192
elif "xlarge" in model_name:
lowercase__ = 256
elif "huge" in model_name:
lowercase__ = 352
# set label information
lowercase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ = '''imagenet-22k-id2label.json'''
else:
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = FocalNetConfig(
embed_dim=A , depths=A , focal_levels=A , focal_windows=A , use_conv_embed=A , idalabel=A , labelaid=A , use_post_layernorm=A , use_layerscale=A , )
return config
def _SCREAMING_SNAKE_CASE (A ) -> Dict:
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ = '''layernorm.bias'''
if "head" in name:
lowercase__ = name.replace('''head''' , '''classifier''' )
else:
lowercase__ = '''focalnet.''' + name
return name
def _SCREAMING_SNAKE_CASE (A , A , A=False ) -> Any:
"""simple docstring"""
lowercase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , A )
lowercase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A )
lowercase__ = val
lowercase__ = get_focalnet_config(A )
lowercase__ = FocalNetForImageClassification(A )
model.eval()
# load state dict
model.load_state_dict(A )
# verify conversion
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = BitImageProcessor(
do_resize=A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A , crop_size=224 , do_normalize=A , image_mean=A , image_std=A , )
lowercase__ = Image.open(requests.get(A , stream=A ).raw )
lowercase__ = processor(images=A , return_tensors='''pt''' )
lowercase__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ = image_transforms(A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A , atol=1E-4 )
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowercase__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowercase__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowercase__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowercase__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : List[Any] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = 'Normal'
if result[0][0] == 1:
lowerCamelCase : Any = 'Abnormality detected'
| 2
| 1
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase__ :Union[str, Any] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
lowercase__ :List[str] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
lowercase__ :Optional[Any] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
lowercase__ :Optional[Any] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
lowercase__ :str = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
lowercase__ :int = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
lowercase__ :Optional[int] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase , lowercase = randrange(len(lowerCAmelCase__ ) ), randrange(len(lowerCAmelCase__ ) )
lowercase = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase , lowercase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase ( lowerCAmelCase__ = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowerCAmelCase__ ))
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = PokerHand(lowerCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = [PokerHand(lowerCAmelCase__ ) for hand in SORTED_HANDS]
lowercase = poker_hands.copy()
shuffle(lowerCAmelCase__ )
lowercase = chain(sorted(lowerCAmelCase__ ) )
for index, hand in enumerate(lowerCAmelCase__ ):
assert hand == poker_hands[index]
def UpperCamelCase ( ):
'''simple docstring'''
# Test that five high straights are compared correctly.
lowercase = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=lowerCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase ( ):
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase = PokerHand('''2C 4S AS 3D 5C''' )
lowercase = True
lowercase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase ( ):
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase = 0
lowercase = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
lowercase = os.path.join(lowerCAmelCase__ , '''poker_hands.txt''' )
with open(lowerCAmelCase__ ) as file_hand:
for line in file_hand:
lowercase = line[:14].strip()
lowercase = line[15:].strip()
lowercase , lowercase = PokerHand(lowerCAmelCase__ ), PokerHand(lowerCAmelCase__ )
lowercase = player.compare_with(lowerCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 97
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :Optional[Any] = "▁"
lowercase__ :str = {"vocab_file": "prophetnet.tokenizer"}
lowercase__ :List[str] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
lowercase__ :Optional[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
lowercase__ :List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = collections.OrderedDict()
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
lowercase = token.rstrip('''\n''' )
lowercase = index
return vocab
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =VOCAB_FILES_NAMES
lowercase_ : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__="[SEP]" ,A__="[SEP]" ,A__="[SEP]" ,A__="[UNK]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__ = None ,**A__ ,):
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ ,eos_token=A__ ,sep_token=A__ ,unk_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(A__))
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0):
lowercase = f'[unused{i}]'
lowercase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase = 1_2
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A__)
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is None:
return ([0] * len(A__)) + [1]
return ([0] * len(A__)) + [1] + ([0] * len(A__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self ,A__):
return self.sp_model.encode(A__ ,out_type=A__)
def A__ ( self ,A__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(A__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self ,A__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self ,A__):
lowercase = ''''''.join(A__).replace(A__ ,''' ''').strip()
return out_string
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 97
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Any:
__A : Union[str, Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
__A : Dict = 10_24
__A : Tuple = 40_96
__A : Dict = 24
__A : Dict = 16
__A : List[str] = [5, 11, 17, 23]
__A : List[Any] = [2_56, 5_12, 10_24, 10_24]
__A : Union[str, Any] = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
__A : Optional[int] = 7_68
__A : str = [1, 1, 1, 0.5]
__A : int = [2_56, 5_12, 7_68, 7_68]
__A : List[Any] = 1_50
__A : Dict = 16
__A : List[Any] = (1, 3_84, 3_84)
__A : Optional[Any] = False
__A : Optional[Any] = 'project'
if "ade" in checkpoint_url:
__A : List[Any] = True
__A : int = 7_68
__A : Any = [1, 1, 1, 0.5]
__A : List[Any] = 1_50
__A : Union[str, Any] = 16
__A : Dict = 'huggingface/label-files'
__A : Optional[int] = 'ade20k-id2label.json'
__A : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
__A : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
__A : str = idalabel
__A : Any = {v: k for k, v in idalabel.items()}
__A : Optional[int] = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Dict:
__A : int = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _lowerCAmelCase ( __snake_case : Dict ) -> str:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__A : Optional[int] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
__A : List[Any] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
__A : Dict = name.replace('patch_embed' , '' )
if "pos_embed" in name:
__A : int = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
__A : int = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
__A : Any = name.replace('proj' , 'projection' )
if "blocks" in name:
__A : Tuple = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
__A : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__A : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
__A : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
__A : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
__A : Any = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
__A : List[str] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
__A : Any = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
__A : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
__A : Dict = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
__A : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
__A : List[Any] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__A : int = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__A : str = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
__A : str = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
__A : int = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
__A : Optional[int] = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
__A : Dict = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__A : Dict = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__A : int = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__A : Optional[Any] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__A : List[Any] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__A : List[Any] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__A : Dict = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__A : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__A : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__A : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__A : Dict = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__A : Union[str, Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__A : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
__A : Dict = name.replace('bn' , 'batch_norm' )
if "head" in name:
__A : str = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
__A : Tuple = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
__A : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
__A : Any = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
__A : List[str] = name.replace('..' , '.' )
if "stem.conv" in name:
__A : Union[str, Any] = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
__A : int = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
__A : Union[str, Any] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
__A : Optional[Any] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
__A : Union[str, Any] = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
__A : Union[str, Any] = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
__A : Dict = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : List[str] ) -> Any:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : Union[str, Any] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__A : Union[str, Any] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A : Optional[int] = in_proj_weight[: config.hidden_size, :]
__A : Union[str, Any] = in_proj_bias[: config.hidden_size]
__A : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__A : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) -> int:
__A : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Optional[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] ) -> Dict:
__A ,__A : List[Any] = get_dpt_config(__snake_case )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__A : Tuple = torch.load(__snake_case , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__snake_case )
# rename keys
for key in state_dict.copy().keys():
__A : Union[str, Any] = state_dict.pop(__snake_case )
__A : Any = val
# read in qkv matrices
read_in_q_k_v(__snake_case , __snake_case )
# load HuggingFace model
__A : Tuple = DPTForSemanticSegmentation(__snake_case ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Check outputs on an image
__A : int = 4_80 if 'ade' in checkpoint_url else 3_84
__A : List[Any] = DPTImageProcessor(size=__snake_case )
__A : str = prepare_img()
__A : Any = image_processor(__snake_case , return_tensors='pt' )
# forward pass
__A : Optional[int] = model(**__snake_case ).logits if 'ade' in checkpoint_url else model(**__snake_case ).predicted_depth
if show_prediction:
__A : int = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=__snake_case , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 190
|
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Tuple = 3
__A : Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
| 1
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__UpperCAmelCase = logging.getLogger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> str:
UpperCAmelCase_ : List[Any] = False
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
if not self.initialized:
UpperCAmelCase_ : List[Any] = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
UpperCAmelCase_ : Any = True
def __UpperCAmelCase ( self ) -> int:
self.retriever.index.init_index()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = self.retriever._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return doc_ids, retrieved_doc_embeds
class lowerCamelCase (__snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(UpperCamelCase__ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
UpperCAmelCase_ : Dict = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase_ : Any = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase_ : Optional[Any] = ray.get(random_worker.retrieve.remote(UpperCamelCase__ , UpperCamelCase__ ) )
else:
UpperCAmelCase_ : Optional[int] = self._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> Optional[int]:
return super(UpperCamelCase__ , cls ).get_tokenizers(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Optional[Any] = kwargs.pop('config' , UpperCamelCase__ ) or RagConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase_ : Dict = RagTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = rag_tokenizer.question_encoder
UpperCAmelCase_ : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase_ : List[Any] = "custom"
UpperCAmelCase_ : Any = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase__ )
else:
UpperCAmelCase_ : Tuple = cls._build_index(UpperCamelCase__ )
return cls(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , retrieval_workers=UpperCamelCase__ , index=UpperCamelCase__ , )
| 370
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__UpperCAmelCase = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__UpperCAmelCase = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__UpperCAmelCase = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
def remove_articles(__snake_case : Tuple ):
UpperCAmelCase_ : Optional[int] = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(__snake_case , ' ' , __snake_case )
def white_space_fix(__snake_case : int ):
return " ".join(text.split() )
def remove_punc(__snake_case : int ):
UpperCAmelCase_ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [any(compute_exact(__snake_case , __snake_case ) for ref in refs ) for pred, refs in zip(__snake_case , __snake_case )]
return (sum(__snake_case ) / len(__snake_case )) * 100
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase_ : str = Counter(__snake_case )
UpperCAmelCase_ : List[Any] = Counter(__snake_case )
UpperCAmelCase_ : int = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase_ : Any = scount * numref
UpperCAmelCase_ : List[Any] = Counter(__snake_case )
UpperCAmelCase_ : Dict = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase_ : int = ccount * numref
# KEEP
UpperCAmelCase_ : Optional[Any] = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase_ : Any = keepgramcounter_rep & rgramcounter
UpperCAmelCase_ : Union[str, Any] = sgramcounter_rep & rgramcounter
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Optional[Any] = 1
if len(__snake_case ) > 0:
UpperCAmelCase_ : List[str] = keeptmpscorea / len(__snake_case )
if len(__snake_case ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase_ : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase_ : List[Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase_ : List[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase_ : Optional[int] = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase_ : Dict = delgramcounter_rep - rgramcounter
UpperCAmelCase_ : Optional[Any] = sgramcounter_rep - rgramcounter
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ : List[Any] = 1
if len(__snake_case ) > 0:
UpperCAmelCase_ : Dict = deltmpscorea / len(__snake_case )
# ADDITION
UpperCAmelCase_ : Tuple = set(__snake_case ) - set(__snake_case )
UpperCAmelCase_ : Union[str, Any] = set(__snake_case ) & set(__snake_case )
UpperCAmelCase_ : Dict = set(__snake_case ) - set(__snake_case )
UpperCAmelCase_ : List[str] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Any = 1
if len(__snake_case ) > 0:
UpperCAmelCase_ : Dict = addtmpscore / len(__snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase_ : Optional[int] = addtmpscore / len(__snake_case )
UpperCAmelCase_ : Optional[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase_ : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = len(__snake_case )
UpperCAmelCase_ : List[str] = ssent.split(' ' )
UpperCAmelCase_ : Union[str, Any] = csent.split(' ' )
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Tuple = []
for rsent in rsents:
UpperCAmelCase_ : List[Any] = rsent.split(' ' )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : str = []
ragramslist.append(__snake_case )
for i in range(0 , len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
UpperCAmelCase_ : Tuple = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(__snake_case )
if i < len(__snake_case ) - 2:
UpperCAmelCase_ : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(__snake_case )
if i < len(__snake_case ) - 3:
UpperCAmelCase_ : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(__snake_case )
ragramslist.append(__snake_case )
ragramslist.append(__snake_case )
ragramslist.append(__snake_case )
for i in range(0 , len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
UpperCAmelCase_ : str = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(__snake_case )
if i < len(__snake_case ) - 2:
UpperCAmelCase_ : List[str] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(__snake_case )
if i < len(__snake_case ) - 3:
UpperCAmelCase_ : Any = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(__snake_case )
for i in range(0 , len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
UpperCAmelCase_ : Optional[int] = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(__snake_case )
if i < len(__snake_case ) - 2:
UpperCAmelCase_ : Tuple = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(__snake_case )
if i < len(__snake_case ) - 3:
UpperCAmelCase_ : Union[str, Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(__snake_case )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : int = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : str = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : int = SARIngram(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase_ : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase_ : List[str] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase_ : Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase__ ( __snake_case : List[Any] , __snake_case : bool = True , __snake_case : str = "13a" , __snake_case : bool = True ):
'''simple docstring'''
if lowercase:
UpperCAmelCase_ : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase_ : Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(__snake_case )()(__snake_case )
else:
UpperCAmelCase_ : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(__snake_case )
elif tokenizer == "moses":
UpperCAmelCase_ : Optional[Any] = sacremoses.MosesTokenizer().tokenize(__snake_case , return_str=__snake_case , escape=__snake_case )
elif tokenizer == "penn":
UpperCAmelCase_ : Dict = sacremoses.MosesTokenizer().penn_tokenize(__snake_case , return_str=__snake_case )
else:
UpperCAmelCase_ : int = sentence
if not return_str:
UpperCAmelCase_ : Any = normalized_sent.split()
return normalized_sent
def lowercase__ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Dict ):
'''simple docstring'''
if not (len(__snake_case ) == len(__snake_case ) == len(__snake_case )):
raise ValueError('Sources length must match predictions and references lengths.' )
UpperCAmelCase_ : Tuple = 0
for src, pred, refs in zip(__snake_case , __snake_case , __snake_case ):
sari_score += SARIsent(normalize(__snake_case ) , normalize(__snake_case ) , [normalize(__snake_case ) for sent in refs] )
UpperCAmelCase_ : Any = sari_score / len(__snake_case )
return 100 * sari_score
def lowercase__ ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : str="exp" , __snake_case : Any=None , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=False , __snake_case : List[str]=False , ):
'''simple docstring'''
UpperCAmelCase_ : int = len(references[0] )
if any(len(__snake_case ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCAmelCase_ : Dict = [[refs[i] for refs in references] for i in range(__snake_case )]
UpperCAmelCase_ : str = sacrebleu.corpus_bleu(
__snake_case , __snake_case , smooth_method=__snake_case , smooth_value=__snake_case , force=__snake_case , lowercase=__snake_case , use_effective_order=__snake_case , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : List[Any] = {}
result.update({'sari': compute_sari(sources=_UpperCamelCase , predictions=_UpperCamelCase , references=_UpperCamelCase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=_UpperCamelCase , references=_UpperCamelCase )} )
result.update({'exact': compute_em(predictions=_UpperCamelCase , references=_UpperCamelCase )} )
return result
| 145
| 0
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_A : Union[str, Any] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_A : List[Any] = haversine_distance(snake_case_,snake_case_,snake_case_,snake_case_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_A : Dict = (b_lata + b_lata) / 2
_A : int = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_A : Union[str, Any] = (sin(snake_case_ ) ** 2) * (cos(snake_case_ ) ** 2)
_A : str = cos(sigma / 2 ) ** 2
_A : List[str] = (sigma - sin(snake_case_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_A : List[str] = (cos(snake_case_ ) ** 2) * (sin(snake_case_ ) ** 2)
_A : Optional[Any] = sin(sigma / 2 ) ** 2
_A : int = (sigma + sin(snake_case_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ : int = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 190
|
'''simple docstring'''
import argparse
lowercase__ : Any = '''docs/source/_static/js/custom.js'''
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> str:
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[Any] = f.readlines()
__A : List[str] = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__A : Tuple = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowercase__ : List[str] = parser.parse_args()
update_custom_js(args.version)
| 190
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self : Optional[Any] , _UpperCAmelCase : int = 1_28 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : float = 2_000.0 , _UpperCAmelCase : int = 7_68 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : float = 0.1 , ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Sequential(
nn.Linear(_UpperCAmelCase , d_model * 4 , bias=_UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_UpperCAmelCase ) , nn.SiLU() , )
__lowercase = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = False
__lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.Dropout(p=_UpperCAmelCase )
__lowercase = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase ):
# FiLM conditional T5 decoder
__lowercase = DecoderLayer(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase )
self.decoders.append(_UpperCAmelCase )
__lowercase = TaLayerNorm(_UpperCAmelCase )
__lowercase = nn.Dropout(p=_UpperCAmelCase )
__lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def a__ ( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowercase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowercase = self.conditioning_emb(_UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowercase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowercase = torch.broadcast_to(
torch.arange(_UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowercase = self.position_encoding(_UpperCAmelCase )
__lowercase = self.continuous_inputs_projection(_UpperCAmelCase )
inputs += position_encodings
__lowercase = self.dropout(_UpperCAmelCase )
# decoder: No padding present.
__lowercase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowercase = [(x, self.encoder_decoder_mask(_UpperCAmelCase , _UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowercase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowercase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowercase = lyr(
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )[0]
__lowercase = self.decoder_norm(_UpperCAmelCase )
__lowercase = self.post_dropout(_UpperCAmelCase )
__lowercase = self.spec_out(_UpperCAmelCase )
return spec_out
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=1e-6 ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase ) )
def a__ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = self.layer[0](
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
if encoder_hidden_states is not None:
__lowercase = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
__lowercase = self.layer[1](
_UpperCAmelCase , key_value_states=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowercase = self.layer[-1](_UpperCAmelCase , _UpperCAmelCase )
return (hidden_states,)
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = TaLayerNorm(_UpperCAmelCase )
__lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase )
__lowercase = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase )
__lowercase = nn.Dropout(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : int=None , ) -> Tuple:
"""simple docstring"""
__lowercase = self.layer_norm(_UpperCAmelCase )
if conditioning_emb is not None:
__lowercase = self.FiLMLayer(_UpperCAmelCase , _UpperCAmelCase )
# Self-attention block
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_states + self.dropout(_UpperCAmelCase )
return hidden_states
class A__ ( nn.Module ):
def __init__( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase )
__lowercase = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase )
__lowercase = nn.Dropout(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , ) -> Any:
"""simple docstring"""
__lowercase = self.layer_norm(_UpperCAmelCase )
__lowercase = self.attention(
_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowercase = hidden_states + self.dropout(_UpperCAmelCase )
return layer_output
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = TaDenseGatedActDense(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase )
__lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase )
__lowercase = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase )
__lowercase = nn.Dropout(_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
__lowercase = self.layer_norm(_UpperCAmelCase )
if conditioning_emb is not None:
__lowercase = self.film(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.DenseReluDense(_UpperCAmelCase )
__lowercase = hidden_states + self.dropout(_UpperCAmelCase )
return hidden_states
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.Dropout(_UpperCAmelCase )
__lowercase = NewGELUActivation()
def a__ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.act(self.wi_a(_UpperCAmelCase ) )
__lowercase = self.wi_a(_UpperCAmelCase )
__lowercase = hidden_gelu * hidden_linear
__lowercase = self.dropout(_UpperCAmelCase )
__lowercase = self.wo(_UpperCAmelCase )
return hidden_states
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=1e-6 ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.Parameter(torch.ones(_UpperCAmelCase ) )
__lowercase = eps
def a__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_UpperCAmelCase )
__lowercase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowercase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A__ ( nn.Module ):
def a__ ( self : List[Any] , _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(_UpperCAmelCase , 3.0 )) ))
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.Linear(_UpperCAmelCase , out_features * 2 , bias=_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scale_bias(_UpperCAmelCase )
__lowercase , __lowercase = torch.chunk(_UpperCAmelCase , 2 , -1 )
__lowercase = x * (1 + scale) + shift
return x
| 325
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325
| 1
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_UpperCamelCase = logging.get_logger(__name__)
# General docstring
_UpperCamelCase = '''PoolFormerConfig'''
# Base docstring
_UpperCamelCase = '''sail/poolformer_s12'''
_UpperCamelCase = [1, 512, 7, 7]
# Image classification docstring
_UpperCamelCase = '''sail/poolformer_s12'''
_UpperCamelCase = '''tabby, tabby cat'''
_UpperCamelCase = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
__UpperCAmelCase : Dict = 1 - drop_prob
__UpperCAmelCase : str = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__UpperCAmelCase : List[Any] = keep_prob + torch.rand(lowerCAmelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__UpperCAmelCase : Optional[int] = input.div(lowerCAmelCase__ ) * random_tensor
return output
class _A ( nn.Module ):
def __init__( self , __UpperCAmelCase = None ) -> None:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Optional[Any] = drop_prob
def __A ( self , __UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return drop_path(__UpperCAmelCase , self.drop_prob , self.training )
def __A ( self ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class _A ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[Any] = patch_size if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
__UpperCAmelCase : Any = stride if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
__UpperCAmelCase : Optional[int] = padding if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
__UpperCAmelCase : Dict = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = norm_layer(__UpperCAmelCase ) if norm_layer else nn.Identity()
def __A ( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.projection(__UpperCAmelCase )
__UpperCAmelCase : Tuple = self.norm(__UpperCAmelCase )
return embeddings
class _A ( nn.GroupNorm ):
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(1 , __UpperCAmelCase , **__UpperCAmelCase )
class _A ( nn.Module ):
def __init__( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Dict = nn.AvgPoolad(__UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.pool(__UpperCAmelCase ) - hidden_states
class _A ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Optional[Any] = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__UpperCAmelCase : Dict = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__UpperCAmelCase : int = PoolFormerDropPath(__UpperCAmelCase )
if isinstance(config.hidden_act , __UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = ACTaFN[config.hidden_act]
else:
__UpperCAmelCase : str = config.hidden_act
def __A ( self , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.conva(__UpperCAmelCase )
__UpperCAmelCase : List[str] = self.act_fn(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.drop(__UpperCAmelCase )
__UpperCAmelCase : int = self.conva(__UpperCAmelCase )
__UpperCAmelCase : Tuple = self.drop(__UpperCAmelCase )
return hidden_states
class _A ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[str] = PoolFormerPooling(__UpperCAmelCase )
__UpperCAmelCase : Any = PoolFormerOutput(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[int] = PoolFormerGroupNorm(__UpperCAmelCase )
__UpperCAmelCase : int = PoolFormerGroupNorm(__UpperCAmelCase )
# Useful for training neural nets
__UpperCAmelCase : Optional[int] = PoolFormerDropPath(__UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
__UpperCAmelCase : Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
__UpperCAmelCase : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
__UpperCAmelCase : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if self.use_layer_scale:
__UpperCAmelCase : int = self.pooling(self.before_norm(__UpperCAmelCase ) )
__UpperCAmelCase : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__UpperCAmelCase : List[str] = hidden_states + self.drop_path(__UpperCAmelCase )
__UpperCAmelCase : List[str] = ()
__UpperCAmelCase : Union[str, Any] = self.output(self.after_norm(__UpperCAmelCase ) )
__UpperCAmelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__UpperCAmelCase : int = hidden_states + self.drop_path(__UpperCAmelCase )
__UpperCAmelCase : List[str] = (output,) + outputs
return outputs
else:
__UpperCAmelCase : Dict = self.drop_path(self.pooling(self.before_norm(__UpperCAmelCase ) ) )
# First residual connection
__UpperCAmelCase : Dict = pooling_output + hidden_states
__UpperCAmelCase : Any = ()
# Second residual connection inside the PoolFormerOutput block
__UpperCAmelCase : Optional[int] = self.drop_path(self.output(self.after_norm(__UpperCAmelCase ) ) )
__UpperCAmelCase : Tuple = hidden_states + layer_output
__UpperCAmelCase : int = (output,) + outputs
return outputs
class _A ( nn.Module ):
def __init__( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Tuple = config
# stochastic depth decay rule
__UpperCAmelCase : str = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__UpperCAmelCase : int = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__UpperCAmelCase : int = nn.ModuleList(__UpperCAmelCase )
# Transformer blocks
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__UpperCAmelCase : Any = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = nn.ModuleList(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = () if output_hidden_states else None
__UpperCAmelCase : Optional[int] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__UpperCAmelCase : List[str] = layers
# Get patch embeddings from hidden_states
__UpperCAmelCase : List[str] = embedding_layer(__UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__UpperCAmelCase ):
__UpperCAmelCase : Dict = blk(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = layer_outputs[0]
if output_hidden_states:
__UpperCAmelCase : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase )
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = PoolFormerConfig
_SCREAMING_SNAKE_CASE : Optional[int] = "poolformer"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "pixel_values"
_SCREAMING_SNAKE_CASE : Optional[int] = True
def __A ( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : Any = value
_UpperCamelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_UpperCamelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCAmelCase : List[str] = config
__UpperCAmelCase : Optional[int] = PoolFormerEncoder(__UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def __A ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
__UpperCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__UpperCAmelCase : List[str] = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
__UpperCAmelCase : Tuple = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class _A ( nn.Module ):
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.hidden_size )
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.dense(__UpperCAmelCase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __SCREAMING_SNAKE_CASE , )
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCAmelCase : Tuple = config.num_labels
__UpperCAmelCase : List[str] = PoolFormerModel(__UpperCAmelCase )
# Final norm
__UpperCAmelCase : List[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__UpperCAmelCase : Union[str, Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
__UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : List[str] = self.poolformer(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
__UpperCAmelCase : Any = outputs[0]
__UpperCAmelCase : Any = self.classifier(self.norm(__UpperCAmelCase ).mean([-2, -1] ) )
__UpperCAmelCase : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCAmelCase : Union[str, Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCAmelCase : Optional[int] = """single_label_classification"""
else:
__UpperCAmelCase : str = """multi_label_classification"""
if self.config.problem_type == "regression":
__UpperCAmelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
__UpperCAmelCase : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__UpperCAmelCase : int = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__UpperCAmelCase : str = CrossEntropyLoss()
__UpperCAmelCase : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__UpperCAmelCase : Any = BCEWithLogitsLoss()
__UpperCAmelCase : Union[str, Any] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
__UpperCAmelCase : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
| 350
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__UpperCAmelCase : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
import torch
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__UpperCAmelCase : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = pipeline("""text-classification""" )
__UpperCAmelCase : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : int = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__UpperCAmelCase : Union[str, Any] = """HuggingFace is in"""
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase )
__UpperCAmelCase : Any = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , )
__UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__UpperCAmelCase ):
text_classifier(__UpperCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 16
| 0
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__snake_case = pytest.mark.integration
@require_faiss
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(UpperCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
UpperCamelCase__ :Any = dset.map(
lambda UpperCamelCase_ , UpperCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase__ , UpperCamelCase__ :str = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ , UpperCamelCase__ :str = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(UpperCamelCase_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
UpperCamelCase__ :Optional[int] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase__ :str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
UpperCamelCase__ :Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCamelCase__ :Any = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ :Optional[int] = 1
UpperCamelCase__ , UpperCamelCase__ :str = index.search(UpperCamelCase_ )
self.assertRaises(UpperCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase__ :List[str] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = index.search_batch(UpperCamelCase_ )
self.assertRaises(UpperCamelCase_ , index.search_batch , queries[0] )
UpperCamelCase__ :List[Any] = [scores[0] for scores in total_scores]
UpperCamelCase__ :Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Tuple = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase__ :Dict = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :List[Any] = faiss.IndexFlat(5 )
UpperCamelCase__ :List[Any] = FaissIndex(custom_index=UpperCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase__ :Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ :Dict = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ :str = 1
UpperCamelCase__ , UpperCamelCase__ :List[str] = index.search(UpperCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a ( __a ) -> Tuple:
'''simple docstring'''
import faiss
UpperCamelCase__ :Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase__ :Optional[int] = '''index.faiss'''
UpperCamelCase__ :Tuple = f'''mock://{index_name}'''
index.save(__a , storage_options=mockfs.storage_options )
UpperCamelCase__ :List[Any] = FaissIndex.load(__a , storage_options=mockfs.storage_options )
UpperCamelCase__ :Tuple = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ :int = 1
UpperCamelCase__ , UpperCamelCase__ :str = index.search(__a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
UpperCamelCase__ :Any = Elasticsearch()
UpperCamelCase__ :Any = {'''acknowledged''': True}
UpperCamelCase__ :int = ElasticSearchIndex(es_client=UpperCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
UpperCamelCase__ :Any = '''foo'''
UpperCamelCase__ :Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
UpperCamelCase__ , UpperCamelCase__ :int = index.search(UpperCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase__ :Dict = '''foo'''
UpperCamelCase__ :Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
UpperCamelCase__ , UpperCamelCase__ :Any = index.search(UpperCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase__ :Tuple = ['''foo''', '''bar''', '''foobar''']
UpperCamelCase__ :Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
UpperCamelCase__ , UpperCamelCase__ :List[Any] = index.search_batch(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [scores[0] for scores in total_scores]
UpperCamelCase__ :int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase_ )
# batched queries with timeout
UpperCamelCase__ :Any = ['''foo''', '''bar''', '''foobar''']
UpperCamelCase__ :int = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = index.search_batch(UpperCamelCase_ , request_timeout=30 )
UpperCamelCase__ :Any = [scores[0] for scores in total_scores]
UpperCamelCase__ :str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase_ )
| 97
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
| 97
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowerCamelCase , return_tensors="""np""" )
_lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = processor(text=lowerCamelCase )
_lowerCAmelCase = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(lowerCamelCase ):
processor()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(lowerCamelCase )
_lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
a : Tuple = logging.getLogger(__name__)
a : Dict = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] ="""bertabs"""
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=512 , lowerCAmelCase__=6 , lowerCAmelCase__=512 , lowerCAmelCase__=8 , lowerCAmelCase__=512 , lowerCAmelCase__=0.2 , lowerCAmelCase__=6 , lowerCAmelCase__=768 , lowerCAmelCase__=8 , lowerCAmelCase__=2048 , lowerCAmelCase__=0.2 , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
a : Dict = vocab_size
a : str = max_pos
a : str = enc_layers
a : int = enc_hidden_size
a : Tuple = enc_heads
a : Dict = enc_ff_size
a : List[Any] = enc_dropout
a : str = dec_layers
a : Dict = dec_hidden_size
a : Union[str, Any] = dec_heads
a : Union[str, Any] = dec_ff_size
a : List[Any] = dec_dropout
| 105
|
'''simple docstring'''
def __UpperCAmelCase ( a_: str, a_: str ):
if len(a_ ) != len(a_ ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : Dict = 0
for chara, chara in zip(a_, a_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : str = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 99
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
_lowerCamelCase : List[str] = "bert-base-cased"
_lowerCamelCase : str = "fp16"
_lowerCamelCase : Optional[int] = "bf16"
_lowerCamelCase : List[str] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Tuple ):
super().setUp()
UpperCAmelCase : Optional[Any] = dict(
ACCELERATE_USE_FSDP='''true''', MASTER_ADDR='''localhost''', MASTER_PORT='''10999''', RANK='''0''', LOCAL_RANK='''0''', WORLD_SIZE='''1''', )
def __magic_name__ ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : Union[str, Any] = F'''{i + 1}'''
UpperCAmelCase : Union[str, Any] = strategy
with mockenv_context(**__A ):
UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1 ) )
def __magic_name__ ( self : Dict ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
UpperCAmelCase : int = self.dist_env.copy()
UpperCAmelCase : Dict = prefetch_policy
with mockenv_context(**__A ):
UpperCAmelCase : Optional[int] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1 ) )
def __magic_name__ ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
UpperCAmelCase : Any = self.dist_env.copy()
UpperCAmelCase : int = state_dict_type
with mockenv_context(**__A ):
UpperCAmelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase : Any = self.dist_env.copy()
UpperCAmelCase : List[Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase : Tuple = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase : List[str] = '''2000'''
with mockenv_context(**__A ):
UpperCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase : List[str] = self.dist_env.copy()
UpperCAmelCase : Tuple = '''TRANSFORMER_BASED_WRAP'''
UpperCAmelCase : Optional[Any] = '''T5Layer'''
with mockenv_context(**__A ):
UpperCAmelCase : int = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : str = '''SIZE_BASED_WRAP'''
UpperCAmelCase : str = '''0'''
with mockenv_context(**__A ):
UpperCAmelCase : Optional[int] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __magic_name__ ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : int = mp_dtype
with mockenv_context(**__A ):
UpperCAmelCase : int = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase : Any = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase : Any = torch.bfloataa
UpperCAmelCase : Optional[Any] = MixedPrecision(param_dtype=__A, reduce_dtype=__A, buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler, __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def __magic_name__ ( self : Optional[int] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase : Any = self.dist_env.copy()
UpperCAmelCase : int = str(__A ).lower()
with mockenv_context(**__A ):
UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : List[Any] ):
super().setUp()
UpperCAmelCase : int = 0.8_2
UpperCAmelCase : List[str] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
UpperCAmelCase : int = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase : List[str] = 1_6_0
UpperCAmelCase : Optional[int] = 1_6_0
UpperCAmelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __magic_name__ ( self : str ):
UpperCAmelCase : str = os.path.join(self.test_scripts_folder, '''test_performance.py''' )
UpperCAmelCase : Any = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
UpperCAmelCase : Union[str, Any] = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder, '''test_checkpointing.py''' )
UpperCAmelCase : Optional[int] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__A ):
UpperCAmelCase : Optional[Any] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase : Any = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
UpperCAmelCase : Tuple = cmd_config[:-1]
UpperCAmelCase : List[str] = os.path.join(self.tmpdir, '''epoch_0''' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[Any] = os.path.join(self.test_scripts_folder, '''test_peak_memory_usage.py''' )
UpperCAmelCase : str = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase : List[Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
| 99
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : int = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta-prelayernorm'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : List[Any] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[str] = hidden_act
__A : Dict = intermediate_size
__A : Optional[int] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Tuple = max_position_embeddings
__A : Union[str, Any] = type_vocab_size
__A : Any = initializer_range
__A : str = layer_norm_eps
__A : int = position_embedding_type
__A : Optional[Any] = use_cache
__A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 190
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = tempfile.mkdtemp()
# fmt: off
__A : Optional[int] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : List[str] = {'unk_token': '<unk>'}
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
__A : str = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.get_tokenizer()
__A : int = self.get_rust_tokenizer()
__A : Any = self.get_image_processor()
__A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
__A : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
__A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
__A : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Any = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[str] = self.prepare_image_inputs()
__A : Dict = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : List[Any] = processor(text=_UpperCAmelCase)
__A : Optional[int] = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : int = 'lower newer'
__A : Any = self.prepare_image_inputs()
__A : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : str = processor.batch_decode(_UpperCAmelCase)
__A : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = 'lower newer'
__A : Any = self.prepare_image_inputs()
__A : Any = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 190
| 1
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : str , snake_case_ : Any , snake_case_ : List[Any]=13 , snake_case_ : Optional[int]=7 , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : List[Any]=True , snake_case_ : List[str]=99 , snake_case_ : List[Any]=24 , snake_case_ : List[str]=2 , snake_case_ : Dict=6 , snake_case_ : Optional[Any]=37 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Tuple=512 , snake_case_ : int=16 , snake_case_ : Any=2 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Tuple=3 , snake_case_ : Union[str, Any]=None , snake_case_ : str=1_000 , ) -> int:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __magic_name__ ( self : str ) -> List[Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : str , ) -> Optional[int]:
'''simple docstring'''
A__ = LiltModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
A__ = model(snake_case_ , bbox=snake_case_ , token_type_ids=snake_case_ )
A__ = model(snake_case_ , bbox=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Optional[int] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[Any] , ) -> List[Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = LiltForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str , ) -> Optional[Any]:
'''simple docstring'''
A__ = LiltForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, A_, unittest.TestCase ):
lowercase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : Optional[int] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return True
def __magic_name__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A__ = LiltModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def __magic_name__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(snake_case_ )
A__ = torch.tensor([[1, 2]] , device=snake_case_ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case_ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=snake_case_ , bbox=snake_case_ )
A__ = torch.Size([1, 2, 768] )
A__ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=snake_case_ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case_ , atol=1e-3 ) )
| 230
|
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
A__ = TOKENIZER_CLASSES
else:
A__ = {tokenizer_name: getattr(lowercase_ , tokenizer_name + "Fast" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
A__ = TOKENIZER_CLASSES[tokenizer_name]
A__ = True
if checkpoint_name is None:
A__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
A__ = tokenizer_class.from_pretrained(lowercase_ , force_download=lowercase_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
A__, A__ = checkpoint.split("/" )
A__ = os.path.join(lowercase_ , lowercase_ )
elif add_prefix:
A__ = checkpoint
A__ = dump_path
else:
A__ = None
A__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A__ = file_path.split(lowercase_ )[-1][0]
if next_char == "/":
A__ = os.path.join(lowercase_ , lowercase_ )
A__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
A__ = tokenizer.save_pretrained(
lowercase_ , legacy_format=lowercase_ , filename_prefix=lowercase_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowercase_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 230
| 1
|
from math import isclose, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =point_y / 4 / point_x
__UpperCamelCase =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__UpperCamelCase =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__UpperCamelCase =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__UpperCamelCase =outgoing_gradient**2 + 4
__UpperCamelCase =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__UpperCamelCase =(point_y - outgoing_gradient * point_x) ** 2 - 1_00
__UpperCamelCase =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__UpperCamelCase =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__UpperCamelCase =x_minus if isclose(__lowerCamelCase , __lowerCamelCase ) else x_plus
__UpperCamelCase =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 1.4 , SCREAMING_SNAKE_CASE__ : List[Any] = -9.6 ):
__UpperCamelCase =0
__UpperCamelCase =first_x_coord
__UpperCamelCase =first_y_coord
__UpperCamelCase =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__UpperCamelCase =next_point(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16
| 0
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> Optional[Any]:
lowerCAmelCase_ :Any = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Union[str, Any] = seq_length
lowerCAmelCase_ :Optional[Any] = is_training
lowerCAmelCase_ :Optional[Any] = use_token_type_ids
lowerCAmelCase_ :List[Any] = use_input_mask
lowerCAmelCase_ :List[Any] = use_labels
lowerCAmelCase_ :Optional[Any] = use_mc_token_ids
lowerCAmelCase_ :Union[str, Any] = vocab_size
lowerCAmelCase_ :Any = hidden_size
lowerCAmelCase_ :List[Any] = num_hidden_layers
lowerCAmelCase_ :Optional[Any] = num_attention_heads
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Any = hidden_dropout_prob
lowerCAmelCase_ :List[str] = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :List[Any] = type_vocab_size
lowerCAmelCase_ :Tuple = type_sequence_label_size
lowerCAmelCase_ :str = initializer_range
lowerCAmelCase_ :List[Any] = num_labels
lowerCAmelCase_ :Dict = num_choices
lowerCAmelCase_ :List[Any] = scope
lowerCAmelCase_ :Union[str, Any] = self.vocab_size - 1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :int = None
if self.use_input_mask:
lowerCAmelCase_ :List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :str = None
if self.use_mc_token_ids:
lowerCAmelCase_ :Dict = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Union[str, Any] = self.get_config()
lowerCAmelCase_ :Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , *__A ) -> List[Any]:
lowerCAmelCase_ :int = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
lowerCAmelCase_ :Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , *__A ) -> Optional[int]:
lowerCAmelCase_ :Dict = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[int] = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) :Optional[Any] = config_and_inputs
lowerCAmelCase_ :Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def __lowerCAmelCase ( self , __A , __A , __A , __A , *__A ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self.num_labels
lowerCAmelCase_ :Optional[int] = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :str = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase_ :Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase_ :Tuple = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :int = True
UpperCAmelCase_ :int = False
UpperCAmelCase_ :List[Any] = False
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = CTRLModelTester(self )
lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A , n_embd=37 )
def __lowerCAmelCase ( self ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@slow
def __lowerCAmelCase ( self ) -> Any:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :str = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self ) -> Any:
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[str] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__A )
lowerCAmelCase_ :Union[str, Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
lowerCAmelCase_ :Dict = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase_ :str = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 353
|
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ = 16
a__ = 32
def lowercase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ) -> Dict:
_snake_case : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : List[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ : str ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[str] = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Any = 16
elif accelerator.mixed_precision != "no":
_snake_case : str = 8
else:
_snake_case : List[Any] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
_snake_case : int = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ = mocked_dataloaders # noqa: F811
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , SCREAMING_SNAKE_CASE__ ) == "1":
_snake_case : int = 2
# New Code #
_snake_case : Tuple = int(args.gradient_accumulation_steps )
_snake_case : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
_snake_case : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Union[str, Any] = config["""lr"""]
_snake_case : Optional[int] = int(config["""num_epochs"""] )
_snake_case : Any = int(config["""seed"""] )
_snake_case : Optional[Any] = int(config["""batch_size"""] )
_snake_case : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : List[Any] = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Optional[int] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
_snake_case : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , local_sgd_steps=SCREAMING_SNAKE_CASE__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
_snake_case : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Dict = model(**SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
_snake_case : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> Any:
_snake_case : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=SCREAMING_SNAKE_CASE__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : int = parser.parse_args()
_snake_case : str = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 317
|
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317
| 1
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : int = [False] * len(A_ )
lowercase_ : List[str] = []
queue.append(A_ )
lowercase_ : Dict = True
while queue:
lowercase_ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
lowercase_ : List[Any] = True
lowercase_ : Any = u
return visited[t]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : str = [-1] * (len(A_ ))
lowercase_ : Optional[Any] = 0
while bfs(A_ , A_ , A_ , A_ ):
lowercase_ : Any = float('''Inf''' )
lowercase_ : Optional[int] = sink
while s != source:
# Find the minimum value in select path
lowercase_ : Any = min(A_ , graph[parent[s]][s] )
lowercase_ : Optional[Any] = parent[s]
max_flow += path_flow
lowercase_ : str = sink
while v != source:
lowercase_ : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase_ : Optional[int] = parent[v]
return max_flow
_lowercase : Optional[int] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowercase : Tuple = 0, 5
print(ford_fulkerson(graph, source, sink))
| 358
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = num_of_nodes
lowercase_ : list[list[int]] = []
lowercase_ : dict[int, int] = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ : Optional[int] = self.find_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase_ : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ : int = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = []
lowercase_ : Optional[Any] = 0
lowercase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ , lowercase_ , lowercase_ : List[Any] = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ , lowercase_ : str = edge
lowercase_ : Tuple = self.m_component[u]
lowercase_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase_ : str = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=0.9 , lowercase=None , ) -> List[str]:
'''simple docstring'''
a__ : List[str] = parent
a__ : List[Any] = batch_size
a__ : Union[str, Any] = image_size
a__ : str = num_channels
a__ : Optional[int] = patch_size
a__ : Dict = tubelet_size
a__ : str = num_frames
a__ : List[str] = is_training
a__ : Optional[int] = use_labels
a__ : Tuple = hidden_size
a__ : Optional[Any] = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : int = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : int = hidden_dropout_prob
a__ : List[str] = attention_probs_dropout_prob
a__ : Tuple = type_sequence_label_size
a__ : List[Any] = initializer_range
a__ : Optional[int] = mask_ratio
a__ : int = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
a__ : Optional[Any] = (image_size // patch_size) ** 2
a__ : int = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
a__ : List[str] = int(mask_ratio * self.seq_length)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
a__ : Union[str, Any] = None
if self.use_labels:
a__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Dict:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__ : Any = VideoMAEModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : List[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : Dict = VideoMAEForPreTraining(lowercase)
model.to(lowercase)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__ : Optional[int] = torch.ones((self.num_masks,))
a__ : Tuple = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
a__ : Tuple = mask.expand(self.batch_size , -1).bool()
a__ : Any = model(lowercase , lowercase)
# model only returns predictions for masked patches
a__ : Optional[int] = mask.sum().item()
a__ : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : Dict = config_and_inputs
a__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[int] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__A : Optional[int] = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__A : int = False
__A : Tuple = False
__A : int = False
__A : List[str] = False
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = VideoMAEModelTester(self)
a__ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> List[str]:
'''simple docstring'''
a__ : List[Any] = copy.deepcopy(lowercase)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__ : List[str] = torch.ones((self.model_tester.num_masks,))
a__ : Dict = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
a__ : List[Any] = mask.expand(self.model_tester.batch_size , -1).bool()
a__ : Any = bool_masked_pos.to(lowercase)
if return_labels:
if model_class in [
*get_values(lowercase),
]:
a__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase)
return inputs_dict
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds')
def __lowercase ( self) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowercase)
a__ : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Any = [*signature.parameters.keys()]
a__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Dict = VideoMAEModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[Any] = True
for model_class in self.all_model_classes:
a__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
a__ : str = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
a__ : Optional[Any] = True
a__ : List[str] = False
a__ : Dict = True
a__ : Any = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : str = model(**self._prepare_for_class(lowercase , lowercase))
a__ : List[str] = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : List[Any] = True
a__ : int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Any = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Tuple = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a__ : str = len(lowercase)
# Check attention is always last and order is fine
a__ : Tuple = True
a__ : int = True
a__ : Optional[int] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Dict = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__ : str = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowercase ( self) -> Dict:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[int] = outputs.hidden_states
a__ : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase) , lowercase)
a__ : str = self.model_tester.seq_length - self.model_tester.num_masks
a__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
def A_ ( ) -> int:
a__ : Union[str, Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__ : Optional[int] = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics').to(
lowercase)
a__ : int = self.default_image_processor
a__ : Optional[int] = prepare_video()
a__ : str = image_processor(lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowercase)
# verify the logits
a__ : Any = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([0.36_69, -0.06_88, -0.24_21]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short').to(lowercase)
a__ : Tuple = self.default_image_processor
a__ : List[str] = prepare_video()
a__ : List[str] = image_processor(lowercase , return_tensors='pt').to(lowercase)
# add boolean mask, indicating which patches to mask
a__ : List[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt')
a__ : Optional[Any] = torch.load(lowercase)
# forward pass
with torch.no_grad():
a__ : Optional[int] = model(**lowercase)
# verify the logits
a__ : int = torch.Size([1, 1408, 1536])
a__ : List[str] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowercase)
self.assertEqual(outputs.logits.shape , lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `True`)
a__ : int = torch.tensor([0.51_42] , device=lowercase)
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `False`)
a__ : Any = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowercase).to(
lowercase)
with torch.no_grad():
a__ : Optional[int] = model(**lowercase)
a__ : Tuple = torch.tensor(torch.tensor([0.64_69]) , device=lowercase)
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4))
| 99
|
def A_ ( A__ , A__ ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
a__ : List[str] = str(bin(A__ ) )[2:] # remove the leading "0b"
a__ : Optional[int] = str(bin(A__ ) )[2:] # remove the leading "0b"
a__ : List[str] = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase_ )]
lowerCAmelCase__ : Optional[int] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(lowerCAmelCase_ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ : List[Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(lowerCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase_ )
lowerCAmelCase__ : Optional[Any] = [''.join(lowerCAmelCase_ ) for row in temp_grid]
lowerCAmelCase__ : Any = ''.join(lowerCAmelCase_ )
return output_string
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : int = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase_ )] # generates template
for position in range(len(lowerCAmelCase_ ) ):
lowerCAmelCase__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Any = min(lowerCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
lowerCAmelCase__ : Dict = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase__ : Optional[Any] = input_string[counter : counter + len(lowerCAmelCase_ )]
grid.append(list(lowerCAmelCase_ ) )
counter += len(lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = '' # reads as zigzag
for position in range(len(lowerCAmelCase_ ) ):
lowerCAmelCase__ : str = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : str = min(lowerCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict[int, str]:
lowerCAmelCase__ : Any = {}
for key_guess in range(1 , len(lowerCAmelCase_ ) ): # tries every key
lowerCAmelCase__ : Optional[int] = decrypt(lowerCAmelCase_ , lowerCAmelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
|
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 230
| 1
|
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Any = grid.shape
lowerCamelCase__ : Dict = [-1, 1, 0, 0]
lowerCamelCase__ : Any = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = [(0, source)], set()
lowerCamelCase__ : int = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = np.empty((rows, cols) , dtype=_UpperCAmelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : str = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Union[str, Any] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Optional[int] = dist + 1
lowerCamelCase__ : Optional[int] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
lowerCamelCase__ : Optional[Any] = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
lowerCamelCase__ : list[list[str]] = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('' )
print(len(_UpperCAmelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 45
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.