code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : str = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = ''.join(bin(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for byte in data )
__a : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a : List[Any] = B'=' * ((6 - len(SCREAMING_SNAKE_CASE__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE__ ) % 6)
else:
__a : Optional[Any] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
__a : int = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__a : str = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a : str = encoded_data[:-padding]
__a : int = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a : Any = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )
__a : int = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> Optional[Any]:
__a : Any = tempfile.mkdtemp()
__a : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
__a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__a : Optional[Any] = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
__a : Any = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def __magic_name__ ( self , **_A ) -> str:
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def __magic_name__ ( self , **_A ) -> Dict:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def __magic_name__ ( self , **_A ) -> str:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_A )
def __magic_name__ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Any:
__a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Union[str, Any] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self ) -> List[str]:
__a : str = self.get_tokenizer()
__a : int = self.get_rust_tokenizer()
__a : Any = self.get_image_processor()
__a : int = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__a : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__a : Optional[Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__a : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def __magic_name__ ( self ) -> Union[str, Any]:
__a : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : List[str] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
__a : Optional[Any] = self.get_image_processor(do_normalize=_A )
__a : Dict = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __magic_name__ ( self ) -> Dict:
__a : Dict = self.get_image_processor()
__a : List[Any] = self.get_tokenizer()
__a : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a : Optional[int] = self.prepare_image_inputs()
__a : Dict = image_processor(_A , return_tensors='np' )
__a : int = processor(images=_A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ) -> Tuple:
__a : Union[str, Any] = self.get_image_processor()
__a : Tuple = self.get_tokenizer()
__a : Dict = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a : Dict = 'Alexandra,T-shirt的价格是15便士。'
__a : int = processor(text=_A )
__a : List[Any] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ) -> Dict:
__a : List[str] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a : str = 'Alexandra,T-shirt的价格是15便士。'
__a : Union[str, Any] = self.prepare_image_inputs()
__a : Union[str, Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __magic_name__ ( self ) -> Tuple:
__a : int = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Union[str, Any] = processor.batch_decode(_A )
__a : Optional[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def __magic_name__ ( self ) -> Any:
__a : int = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : List[str] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a : str = 'Alexandra,T-shirt的价格是15便士。'
__a : Any = self.prepare_image_inputs()
__a : List[str] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 597
| 1
|
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
__lowerCamelCase = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
__lowerCamelCase = {
'jukebox': 5_12,
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES
UpperCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Any , __snake_case : Any=["v3", "v2", "v2"] , __snake_case : Union[str, Any]=5_1_2 , __snake_case : List[Any]=5 , __snake_case : int="<|endoftext|>" , **__snake_case : Tuple , ) -> Tuple:
__magic_name__: Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
super().__init__(
unk_token=__snake_case , n_genres=__snake_case , version=__snake_case , max_n_lyric_tokens=__snake_case , **__snake_case , )
__magic_name__: Optional[int] = version
__magic_name__: str = max_n_lyric_tokens
__magic_name__: Tuple = n_genres
with open(__snake_case , encoding="""utf-8""" ) as vocab_handle:
__magic_name__: List[str] = json.load(__snake_case )
with open(__snake_case , encoding="""utf-8""" ) as vocab_handle:
__magic_name__: List[str] = json.load(__snake_case )
with open(__snake_case , encoding="""utf-8""" ) as vocab_handle:
__magic_name__: Dict = json.load(__snake_case )
__magic_name__: Optional[Any] = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
__magic_name__: int = oov.replace(R"""\-'""" , R"""\-+'""" )
__magic_name__: str = regex.compile(__snake_case )
__magic_name__: Union[str, Any] = {v: k for k, v in self.artists_encoder.items()}
__magic_name__: Union[str, Any] = {v: k for k, v in self.genres_encoder.items()}
__magic_name__: Optional[int] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCamelCase__ ( self : Dict ) -> str:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCamelCase__ ( self : Tuple ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCamelCase__ ( self : Dict , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> List[str]:
__magic_name__: int = [self.artists_encoder.get(__snake_case , 0 ) for artist in list_artists]
for genres in range(len(__snake_case ) ):
__magic_name__: List[Any] = [self.genres_encoder.get(__snake_case , 0 ) for genre in list_genres[genres]]
__magic_name__: Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__magic_name__: Optional[int] = [[self.lyrics_encoder.get(__snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Union[str, Any] ) -> str:
return list(__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : Tuple , __snake_case : int , __snake_case : str , **__snake_case : str ) -> Dict:
__magic_name__, __magic_name__, __magic_name__: int = self.prepare_for_tokenization(__snake_case , __snake_case , __snake_case )
__magic_name__: Optional[Any] = self._tokenize(__snake_case )
return artist, genre, lyrics
def lowerCamelCase__ ( self : List[Any] , __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__magic_name__: int = artists[idx].lower()
__magic_name__: str = [genres[idx].lower()]
else:
__magic_name__: int = self._normalize(artists[idx] ) + """.v2"""
__magic_name__: Optional[int] = [
self._normalize(__snake_case ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__magic_name__: int = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
__magic_name__: str = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
__magic_name__: List[Any] = {vocab[index]: index + 1 for index in range(len(__snake_case ) )}
__magic_name__: List[Any] = 0
__magic_name__: List[str] = len(__snake_case ) + 1
__magic_name__: Tuple = self.vocab
__magic_name__: List[str] = {v: k for k, v in self.vocab.items()}
__magic_name__: Dict = """"""
else:
__magic_name__: int = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
__magic_name__: Dict = self._run_strip_accents(__snake_case )
__magic_name__: Tuple = lyrics.replace("""\\""" , """\n""" )
__magic_name__: Optional[int] = self.out_of_vocab.sub("""""" , __snake_case ), [], []
return artists, genres, lyrics
def lowerCamelCase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = unicodedata.normalize("""NFD""" , __snake_case )
__magic_name__: Union[str, Any] = []
for char in text:
__magic_name__: List[Any] = unicodedata.category(__snake_case )
if cat == "Mn":
continue
output.append(__snake_case )
return "".join(__snake_case )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : str ) -> str:
__magic_name__: List[Any] = (
[chr(__snake_case ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(__snake_case ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(__snake_case ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
__magic_name__: List[Any] = frozenset(__snake_case )
__magic_name__: List[str] = re.compile(R"""_+""" )
__magic_name__: int = """""".join([c if c in accepted else """_""" for c in text.lower()] )
__magic_name__: Union[str, Any] = pattern.sub("""_""" , __snake_case ).strip("""_""" )
return text
def lowerCamelCase__ ( self : List[str] , __snake_case : List[str] ) -> str:
return " ".join(__snake_case )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : int , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : bool = False ) -> Tuple:
# Convert to TensorType
if not isinstance(__snake_case , __snake_case ):
__magic_name__: int = TensorType(__snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
__magic_name__: int = tf.constant
__magic_name__: Tuple = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
__magic_name__: List[str] = torch.tensor
__magic_name__: List[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
__magic_name__: str = jnp.array
__magic_name__: int = _is_jax
else:
__magic_name__: Optional[Any] = np.asarray
__magic_name__: int = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__magic_name__: Any = [inputs]
if not is_tensor(__snake_case ):
__magic_name__: List[Any] = as_tensor(__snake_case )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self : Any , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int]="" , __snake_case : int="pt" ) -> BatchEncoding:
__magic_name__: List[str] = [0, 0, 0]
__magic_name__: Tuple = [artist] * len(self.version )
__magic_name__: int = [genres] * len(self.version )
__magic_name__, __magic_name__, __magic_name__: str = self.tokenize(__snake_case , __snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Union[str, Any] = self._convert_token_to_id(__snake_case , __snake_case , __snake_case )
__magic_name__: str = [-INFINITY] * len(full_tokens[-1] )
__magic_name__: List[str] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__: str = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__snake_case ) )
__magic_name__: Tuple = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__snake_case ) )
__magic_name__: Tuple = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__snake_case ) )
return (artists_file, genres_file, lyrics_file)
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Any , __snake_case : Any , __snake_case : Any ) -> Tuple:
__magic_name__: List[str] = self.artists_decoder.get(__snake_case )
__magic_name__: str = [self.genres_decoder.get(__snake_case ) for genre in genres_index]
__magic_name__: List[Any] = [self.lyrics_decoder.get(__snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 213
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "dpr"
def __init__( self : str , __snake_case : Any=3_0_5_2_2 , __snake_case : Any=7_6_8 , __snake_case : Any=1_2 , __snake_case : Any=1_2 , __snake_case : int=3_0_7_2 , __snake_case : List[Any]="gelu" , __snake_case : int=0.1 , __snake_case : Dict=0.1 , __snake_case : List[str]=5_1_2 , __snake_case : Optional[int]=2 , __snake_case : Any=0.02 , __snake_case : Tuple=1E-12 , __snake_case : int=0 , __snake_case : List[Any]="absolute" , __snake_case : int = 0 , **__snake_case : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=__snake_case , **__snake_case )
__magic_name__: int = vocab_size
__magic_name__: Tuple = hidden_size
__magic_name__: Optional[Any] = num_hidden_layers
__magic_name__: Dict = num_attention_heads
__magic_name__: Any = hidden_act
__magic_name__: Tuple = intermediate_size
__magic_name__: int = hidden_dropout_prob
__magic_name__: Any = attention_probs_dropout_prob
__magic_name__: Any = max_position_embeddings
__magic_name__: Optional[Any] = type_vocab_size
__magic_name__: Optional[Any] = initializer_range
__magic_name__: Optional[Any] = layer_norm_eps
__magic_name__: int = projection_dim
__magic_name__: Optional[int] = position_embedding_type
| 213
| 1
|
'''simple docstring'''
UpperCamelCase__ : Dict = 65_521
def __UpperCamelCase( _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : Tuple = 0
for plain_chr in plain_text:
UpperCAmelCase__ : str = (a + ord(_A )) % MOD_ADLER
UpperCAmelCase__ : Optional[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 614
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ : Dict = logging.getLogger(__name__)
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_A , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_A , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_A , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_A , default='''data/dump''' , help='''The dump file prefix.''' )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
UpperCAmelCase__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
UpperCAmelCase__ : Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ : Union[str, Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
UpperCAmelCase__ : Dict = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
UpperCAmelCase__ : List[Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(_A )} examples to process.''' )
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 1_00_00
UpperCAmelCase__ : Tuple = time.time()
for text in data:
UpperCAmelCase__ : Any = F'''{bos} {text.strip()} {sep}'''
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ : int = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
UpperCAmelCase__ : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(_A )} examples processed.''' )
UpperCAmelCase__ : Dict = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
UpperCAmelCase__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ : Any = [np.uintaa(_A ) for d in rslt]
else:
UpperCAmelCase__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(_A , '''wb''' ) as handle:
pickle.dump(rslt_ , _A , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 614
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( UpperCamelCase):
a_ = ["image_processor", "tokenizer"]
a_ = "CLIPImageProcessor"
a_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[Any] , _A : List[str]=None , _A : List[str]=None , **_A : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
UpperCAmelCase_ : Any = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
def __call__( self : str , _A : Tuple=None , _A : Union[str, Any]=None , _A : Dict=None , **_A : Optional[int] ) -> Any:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase_ : Dict = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
UpperCAmelCase_ : Optional[int] = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
UpperCAmelCase_ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def A ( self : Dict , *_A : Tuple , **_A : Optional[int] ) -> List[str]:
return self.tokenizer.batch_decode(*_A , **_A )
def A ( self : int , *_A : int , **_A : List[str] ) -> Optional[int]:
return self.tokenizer.decode(*_A , **_A )
@property
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.model_input_names
UpperCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def A ( self : Any ) -> int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 713
|
'''simple docstring'''
def __UpperCAmelCase ( A : List[str] , A : Tuple , A : Union[str, Any]=False ) -> Tuple:
if isinstance(A , A ) and isinstance(A , A ):
UpperCAmelCase_ : Any = len(set_a.intersection(A ) )
if alternative_union:
UpperCAmelCase_ : Optional[Any] = len(A ) + len(A )
else:
UpperCAmelCase_ : Dict = len(set_a.union(A ) )
return intersection / union
if isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase_ : Tuple = len(A ) + len(A )
return len(A ) / union
else:
UpperCAmelCase_ : Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(A ) / len(A )
return len(A ) / len(A )
return None
if __name__ == "__main__":
_UpperCamelCase : Any = {'a', 'b', 'c', 'd', 'e'}
_UpperCamelCase : Optional[int] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 216
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase (_UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = 'megatron-bert'
def __init__( self : List[Any] , __UpperCAmelCase : Any=2_9_0_5_6 , __UpperCAmelCase : int=1_0_2_4 , __UpperCAmelCase : Optional[int]=2_4 , __UpperCAmelCase : Any=1_6 , __UpperCAmelCase : str=4_0_9_6 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=1e-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Tuple="absolute" , __UpperCAmelCase : int=True , **__UpperCAmelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
| 196
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase_ : Any = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ : str = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
lowercase = (images / 2 + 0.5).clamp(0 , 1 )
lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = numpy_to_pil(lowercase_ )
return images
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
if images.ndim == 3:
lowercase = images[None, ...]
lowercase = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase = [Image.fromarray(lowercase_ ) for image in images]
return pil_images
| 588
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( A , A , A=None , A=None , A=None , A=None , A=None , A=None , ) -> Optional[int]:
if attention_mask is None:
lowerCAmelCase__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=99 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=0.02 , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = initializer_range
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase__ = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
lowerCAmelCase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , )
lowerCAmelCase__ = prepare_blenderbot_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = 20
lowerCAmelCase__ = model_class_name(lowerCamelCase_ )
lowerCAmelCase__ = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase__ , lowerCAmelCase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
lowerCAmelCase__ = model.decode(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = 20
lowerCAmelCase__ = model_class_name(lowerCamelCase_ )
lowerCAmelCase__ = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase__ , lowerCAmelCase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
lowerCAmelCase__ = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = 9_9
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase__ = input_ids.shape[0]
lowerCAmelCase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_config_and_data()
lowerCAmelCase__ = FlaxBlenderbotForConditionalGeneration(lowerCamelCase_ )
lowerCAmelCase__ = lm_model(input_ids=lowerCamelCase_ )
lowerCAmelCase__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCAmelCase__ = FlaxBlenderbotForConditionalGeneration(lowerCamelCase_ )
lowerCAmelCase__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCAmelCase__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase__ = lm_model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
lowerCAmelCase__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCAmelCase__ = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
lowerCAmelCase__ = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase__ = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class a__ ( a__ , unittest.TestCase , a__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ : str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = FlaxBlenderbotModelTester(self )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(lowerCamelCase_ , lowerCamelCase_=None , **lowerCamelCase_ ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase__ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase__ = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowerCAmelCase__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowerCamelCase_ )
lowerCAmelCase__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase__ = ['''Sam''']
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase__ = model.generate(**lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase__ = tokenizer.batch_decode(lowerCamelCase_ , **lowerCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 98
|
'''simple docstring'''
def _snake_case ( A ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = 0
lowerCAmelCase__ = number
while duplicate > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(A , 10 )
fact_sum += factorial(A )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
__UpperCAmelCase = int(input('''Enter number: ''').strip())
print(
f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 98
| 1
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: Union[str, Any] = input('''Enter message: ''' )
lowercase__: List[Any] = int(input(F"""Enter key [2-{len(__A ) - 1}]: """ ) )
lowercase__: int = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase__: Union[str, Any] = encrypt_message(__A , __A )
elif mode.lower().startswith('''d''' ):
lowercase__: Dict = decrypt_message(__A , __A )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""" )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Union[str, Any] = [''''''] * key
for col in range(__A ):
lowercase__: Optional[Any] = col
while pointer < len(__A ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Tuple = math.ceil(len(__A ) / key )
lowercase__: Union[str, Any] = key
lowercase__: Optional[int] = (num_cols * num_rows) - len(__A )
lowercase__: List[Any] = [''''''] * num_cols
lowercase__: Dict = 0
lowercase__: List[str] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase__: Optional[int] = 0
row += 1
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 586
|
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase_ ( __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return EnvironmentCommand()
def lowercase_ ( __A : int ) -> Dict:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@staticmethod
def A__ ( UpperCAmelCase : ArgumentParser ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =parser.add_parser('''env''' )
download_parser.set_defaults(func=UpperCAmelCase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=UpperCAmelCase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self : Dict , UpperCAmelCase : Tuple , *UpperCAmelCase : Tuple ) -> None:
'''simple docstring'''
lowercase : List[str] =accelerate_config_file
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : Optional[Any] ='''not installed'''
if is_safetensors_available():
import safetensors
lowercase : List[Any] =safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
lowercase : List[str] =f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
lowercase : List[str] ='''not installed'''
lowercase : str ='''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowercase : List[str] =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase ):
lowercase : Dict =load_config_from_file(self._accelerate_config_file ).to_dict()
lowercase : Optional[int] =(
'''\n'''.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else f'\t{accelerate_config}'
)
lowercase : Optional[int] ='''not installed'''
lowercase : Optional[Any] ='''NA'''
if is_torch_available():
import torch
lowercase : List[Any] =torch.__version__
lowercase : Dict =torch.cuda.is_available()
lowercase : Optional[Any] ='''not installed'''
lowercase : List[str] ='''NA'''
if is_tf_available():
import tensorflow as tf
lowercase : List[Any] =tf.__version__
try:
# deprecated in v2.1
lowercase : Optional[Any] =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowercase : Optional[int] =bool(tf.config.list_physical_devices('''GPU''' ) )
lowercase : Optional[Any] ='''not installed'''
lowercase : Optional[Any] ='''not installed'''
lowercase : Optional[Any] ='''not installed'''
lowercase : List[str] ='''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowercase : Dict =flax.__version__
lowercase : List[Any] =jax.__version__
lowercase : Any =jaxlib.__version__
lowercase : Tuple =jax.lib.xla_bridge.get_backend().platform
lowercase : str ={
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'{safetensors_version}',
'''Accelerate version''': f'{accelerate_version}',
'''Accelerate config''': f'{accelerate_config_str}',
'''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})',
'''Tensorflow version (GPU?)''': f'{tf_version} ({tf_cuda_available})',
'''Flax version (CPU?/GPU?/TPU?)''': f'{flax_version} ({jax_backend})',
'''Jax version''': f'{jax_version}',
'''JaxLib version''': f'{jaxlib_version}',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(UpperCAmelCase ) )
return info
@staticmethod
def A__ ( UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 94
| 0
|
from __future__ import annotations
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23
| 0
|
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = size
__lowerCAmelCase = [0] * size
__lowerCAmelCase = [0] * size
@staticmethod
def lowercase ( lowerCAmelCase_ : int ) -> int:
return index | (index + 1)
@staticmethod
def lowercase ( lowerCAmelCase_ : int ) -> int:
return (index & (index + 1)) - 1
def lowercase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = value
while index < self.size:
__lowerCAmelCase = self.get_prev(lowerCAmelCase_ ) + 1
if current_left_border == index:
__lowerCAmelCase = value
else:
__lowerCAmelCase = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self.get_next(lowerCAmelCase_ )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int:
right -= 1 # Because of right is exclusive
__lowerCAmelCase = 0
while left <= right:
__lowerCAmelCase = self.get_prev(lowerCAmelCase_ )
if left <= current_left:
__lowerCAmelCase = max(lowerCAmelCase_ , self.tree[right] )
__lowerCAmelCase = current_left
else:
__lowerCAmelCase = max(lowerCAmelCase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = CTRLTokenizer
__UpperCamelCase : str = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """adapt react readapt apt"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """adapt react readapt apt"""
return input_text, output_text
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : str = """adapt react readapt apt"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 545
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _snake_case ,_snake_case=7 ):
SCREAMING_SNAKE_CASE__ : Dict = None
if token is not None:
SCREAMING_SNAKE_CASE__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE__ : List[str] = """636036"""
SCREAMING_SNAKE_CASE__ : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
SCREAMING_SNAKE_CASE__ : str = requests.get(_snake_case ,headers=_snake_case ).json()
return result["workflow_runs"]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = get_daily_ci_runs(_snake_case )
SCREAMING_SNAKE_CASE__ : int = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = workflow_run["""id"""]
break
return workflow_run_id
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = get_last_daily_ci_runs(_snake_case )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE__ : Tuple = get_artifacts_links(worflow_run_id=_snake_case ,token=_snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE__ : Tuple = artifacts_links[artifact_name]
download_artifact(
artifact_name=_snake_case ,artifact_url=_snake_case ,output_dir=_snake_case ,token=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
get_last_daily_ci_artifacts(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_snake_case ,f'''{artifact_name}.zip''' )
if os.path.isfile(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
with z.open(_snake_case ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = f.read().decode("""UTF-8""" )
return results
| 545
| 1
|
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[Any]:
"""simple docstring"""
A = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
A , A = input_paths_and_base_extractors[compression_format]
if input_path is None:
A = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
assert base_extractor.is_extractable(UpperCamelCase__ )
A = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A = file_path.read_text(encoding='utf-8' )
else:
A = output_path.read_text(encoding='utf-8' )
A = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[str]:
"""simple docstring"""
A = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
A = input_paths[compression_format]
if input_path is None:
A = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
A = Extractor.infer_extractor_format(UpperCamelCase__ )
assert extractor_format is not None
A = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A = file_path.read_text(encoding='utf-8' )
else:
A = output_path.read_text(encoding='utf-8' )
A = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tarfile
A = tmp_path / 'data_dot_dot'
directory.mkdir()
A = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
import tarfile
A = tmp_path / 'data_sym_link'
directory.mkdir()
A = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=UpperCamelCase__ )
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
A = insecure_tar_files[insecure_tar_file]
A = tmp_path / 'extracted'
TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
A = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(UpperCamelCase__ )
assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right
| 690
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690
| 1
|
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
lowercase__ : Dict= path_or_paths if isinstance(snake_case__ , snake_case__ ) else {self.split: path_or_paths}
lowercase__ : Optional[Any]= Text(
cache_dir=snake_case__ , data_files=snake_case__ , features=snake_case__ , **snake_case__ , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ : int= self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ : Dict= None
lowercase__ : Optional[Any]= None
lowercase__ : str= None
lowercase__ : int= None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
lowercase__ : Dict= self.builder.as_dataset(
split=self.split , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
| 703
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85
| 0
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase : Dict =nn.ModuleList(UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : torch.Tensor , UpperCAmelCase : List[torch.tensor] , UpperCAmelCase : List[float] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[Dict[str, Any]] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase , UpperCAmelCase , self.nets ) ):
lowercase , lowercase : List[Any] =controlnet(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
# merge samples
if i == 0:
lowercase , lowercase : Union[str, Any] =down_samples, mid_sample
else:
lowercase : Union[str, Any] =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase , UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A__ ( self : Dict , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = True , UpperCAmelCase : Callable = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[str] = None , ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] =0
lowercase : Dict =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase , is_main_process=UpperCAmelCase , save_function=UpperCAmelCase , safe_serialization=UpperCAmelCase , variant=UpperCAmelCase , )
idx += 1
lowercase : str =model_path_to_save + f'_{idx}'
@classmethod
def A__ ( cls : str , UpperCAmelCase : Optional[Union[str, os.PathLike]] , **UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =0
lowercase : str =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowercase : List[str] =pretrained_model_path
while os.path.isdir(UpperCAmelCase ):
lowercase : Optional[Any] =ControlNetModel.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
controlnets.append(UpperCAmelCase )
idx += 1
lowercase : Dict =pretrained_model_path + f'_{idx}'
logger.info(f'{len(UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(UpperCAmelCase ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(UpperCAmelCase )
| 94
|
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Dict = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase_ : Optional[int] = len(lowerCAmelCase__ ) if (len(lowerCAmelCase__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) ,'Stack'.center(lowerCAmelCase__ ) ,'Postfix'.center(lowerCAmelCase__ ) ,sep=' | ' ,)
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCAmelCase__ ) == 0:
stack.append(lowerCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCAmelCase__ ) # push x to stack
print(
x.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
while len(lowerCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
return "".join(lowerCAmelCase__ ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Dict = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCAmelCase__ ) ):
if infix[i] == "(":
lowerCamelCase_ : str = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase_ : Optional[Any] = '(' # change ")" to "("
return (infix_2_postfix(''.join(lowerCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowercase : int =input("""\nEnter an Infix Equation = """) # Input an Infix equation
_lowercase : Optional[Any] ="""""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 364
| 0
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _snake_case ( _a ):
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Optional[Any] = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE:Optional[Any] = "pt"
SCREAMING_SNAKE_CASE:str = "tf"
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = TFAutoModel.from_pretrained(self.test_model ,from_pt=SCREAMING_SNAKE_CASE__ )
model_tf.save_pretrained(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE:Union[str, Any] = FeaturesManager.determine_framework(self.test_model ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:List[Any] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Tuple = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_tf_available" ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE:str = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_torch_available" ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE:List[Any] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_tf_available" ,SCREAMING_SNAKE_CASE__ ), patch(
"transformers.onnx.features.is_torch_available" ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE:Tuple = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_tf_available" ,SCREAMING_SNAKE_CASE__ ), patch(
"transformers.onnx.features.is_torch_available" ,SCREAMING_SNAKE_CASE__ ):
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:str = FeaturesManager.determine_framework(self.test_model )
| 465
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A_ ( snake_case , snake_case , snake_case=False , snake_case=False , snake_case=True , snake_case=False , snake_case="dummy_doc" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: key_lines}
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: sys_lines}
SCREAMING_SNAKE_CASE:Dict = {}
SCREAMING_SNAKE_CASE:Optional[int] = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.get_doc_mentions(snake_case , key_doc_lines[doc] , snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:Dict = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.get_doc_mentions(snake_case , sys_doc_lines[doc] , snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:str = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
if remove_nested:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.remove_nested_coref_mentions(snake_case , snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.remove_nested_coref_mentions(snake_case , snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE:Optional[Any] = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = get_coref_infos(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {}
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = evaluator.evaluate_documents(snake_case , snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE:Any = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE:Tuple = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE:List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) ,codebase_urls=["https://github.com/ns-moosavi/coval"] ,reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] ,)
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=False ):
SCREAMING_SNAKE_CASE:Optional[int] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE:Optional[int] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE:List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ ,sys_lines=SCREAMING_SNAKE_CASE__ ,metrics=SCREAMING_SNAKE_CASE__ ,NP_only=SCREAMING_SNAKE_CASE__ ,remove_nested=SCREAMING_SNAKE_CASE__ ,keep_singletons=SCREAMING_SNAKE_CASE__ ,min_span=SCREAMING_SNAKE_CASE__ ,)
return score
| 465
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if len(_UpperCAmelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
_UpperCAmelCase : int = sum(array[:k] )
for i in range(len(_UpperCAmelCase ) - k ):
_UpperCAmelCase : Dict = current_sum - array[i] + array[i + k]
_UpperCAmelCase : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__SCREAMING_SNAKE_CASE : Tuple = [randint(-1_000, 1_000) for i in range(100)]
__SCREAMING_SNAKE_CASE : Optional[Any] = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 244
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = "linear"
__UpperCamelCase: Tuple = "cosine"
__UpperCamelCase: Optional[int] = "cosine_with_restarts"
__UpperCamelCase: str = "polynomial"
__UpperCamelCase: int = "constant"
__UpperCamelCase: Any = "constant_with_warmup"
__UpperCamelCase: Optional[Any] = "piecewise_constant"
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int = -1 ) -> Any:
"""simple docstring"""
return LambdaLR(_UpperCAmelCase , lambda _UpperCAmelCase : 1 , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int = -1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1.0 , _UpperCAmelCase ) )
return 1.0
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : str , _UpperCAmelCase : int = -1 ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Union[str, Any] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = rule_str.split(":" )
_UpperCAmelCase : Dict = int(_UpperCAmelCase )
_UpperCAmelCase : int = float(_UpperCAmelCase )
_UpperCAmelCase : Dict = value
_UpperCAmelCase : List[str] = float(rule_list[-1] )
def create_rules_function(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
def rule_func(_UpperCAmelCase : int ) -> float:
_UpperCAmelCase : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_UpperCAmelCase : Optional[Any] = create_rules_function(_UpperCAmelCase , _UpperCAmelCase )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float = 0.5 , _UpperCAmelCase : int = -1 ) -> Any:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : List[Any] ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
_UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = -1 ) -> int:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
_UpperCAmelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=1e-7 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_UpperCAmelCase : int = lr_init - lr_end
_UpperCAmelCase : Optional[Any] = num_training_steps - num_warmup_steps
_UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
_UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, SchedulerType] , _UpperCAmelCase : Optimizer , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : int = -1 , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = SchedulerType(_UpperCAmelCase )
_UpperCAmelCase : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCAmelCase , last_epoch=_UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCAmelCase , step_rules=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , num_cycles=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , power=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
| 244
| 1
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str = "cpu" , _lowerCAmelCase : Union[str, None] = None ) -> None:
UpperCAmelCase : Tuple = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCAmelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCAmelCase : Optional[int] = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase : Union[str, Any] = src_path
torch.save(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 528
|
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Dict:
with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con:
UpperCAmelCase : Any = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]:
UpperCAmelCase : Optional[int] = tmp_path / '''cache'''
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Any = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase : List[str] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : Any = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Tuple = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
with pytest.raises(_lowerCAmelCase ):
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 528
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_A : List[str] = pytest.mark.integration
@require_faiss
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
SCREAMING_SNAKE_CASE__ = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
SCREAMING_SNAKE_CASE__ = dset.add_faiss_index('''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowercase_ ( self ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(A_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase_ ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
SCREAMING_SNAKE_CASE__ = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
SCREAMING_SNAKE_CASE__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
SCREAMING_SNAKE_CASE__ = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
SCREAMING_SNAKE_CASE__ = np.eye(5 , dtype=np.floataa )[::-1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def lowercase_ ( self ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = faiss.IndexFlat(5 )
SCREAMING_SNAKE_CASE__ = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase_ ( self ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
SCREAMING_SNAKE_CASE__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __snake_case ( lowerCAmelCase_ ) -> List[str]:
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
SCREAMING_SNAKE_CASE__ = '''index.faiss'''
SCREAMING_SNAKE_CASE__ = f'''mock://{index_name}'''
index.save(lowerCAmelCase_ , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE__ = FaissIndex.load(lowerCAmelCase_ , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
SCREAMING_SNAKE_CASE__ = Elasticsearch()
SCREAMING_SNAKE_CASE__ = {'''acknowledged''': True}
SCREAMING_SNAKE_CASE__ = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
SCREAMING_SNAKE_CASE__ = '''foo'''
SCREAMING_SNAKE_CASE__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
SCREAMING_SNAKE_CASE__ = '''foo'''
SCREAMING_SNAKE_CASE__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
SCREAMING_SNAKE_CASE__ = ['''foo''', '''bar''', '''foobar''']
SCREAMING_SNAKE_CASE__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(A_ )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
SCREAMING_SNAKE_CASE__ = ['''foo''', '''bar''', '''foobar''']
SCREAMING_SNAKE_CASE__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(A_ , request_timeout=30 )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 100
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
_A : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_A : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {}
with open(lowerCAmelCase_ , '''r''' ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = line.strip()
if line:
SCREAMING_SNAKE_CASE__ = line.split()
SCREAMING_SNAKE_CASE__ = line_number
SCREAMING_SNAKE_CASE__ = words[0]
SCREAMING_SNAKE_CASE__ = value
return result
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ = value[0]
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ = key
SCREAMING_SNAKE_CASE__ = value if '''lm_head''' in full_key else value[0]
_A : Union[str, Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
SCREAMING_SNAKE_CASE__ = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = '''weight'''
else:
SCREAMING_SNAKE_CASE__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> int:
if config_path is not None:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ = read_txt_into_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = WavaVecaForSequenceClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaForCTC(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE__ = fairseq.tasks.setup_task(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_A : List[str] = parser.parse_args()
_A : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 100
| 1
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCAmelCase = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
if args.student_type == "roberta":
_UpperCAmelCase = False
elif args.student_type == "gpt2":
_UpperCAmelCase = False
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
if args.student_type == "roberta":
_UpperCAmelCase = False
def __lowerCamelCase ( ) -> Tuple:
_UpperCAmelCase = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=_lowerCAmelCase , choices=["distilbert", "roberta", "gpt2"] , required=_lowerCAmelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_lowerCAmelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=_lowerCAmelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=_lowerCAmelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=_lowerCAmelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=_lowerCAmelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=_lowerCAmelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=_lowerCAmelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=_lowerCAmelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=_lowerCAmelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=_lowerCAmelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=_lowerCAmelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=_lowerCAmelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=_lowerCAmelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=_lowerCAmelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=_lowerCAmelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCAmelCase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=_lowerCAmelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=_lowerCAmelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=_lowerCAmelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=_lowerCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=_lowerCAmelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=_lowerCAmelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_lowerCAmelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=_lowerCAmelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=_lowerCAmelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=_lowerCAmelCase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=_lowerCAmelCase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=_lowerCAmelCase , default=4_000 , help="Checkpoint interval." )
_UpperCAmelCase = parser.parse_args()
sanity_checks(_lowerCAmelCase )
# ARGS #
init_gpu_params(_lowerCAmelCase )
set_seed(_lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(_lowerCAmelCase ) , _lowerCAmelCase , indent=4 )
git_log(args.dump_path )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[args.student_type]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_UpperCAmelCase = tokenizer.all_special_tokens.index(_lowerCAmelCase )
_UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_UpperCAmelCase = special_tok_ids
_UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
_UpperCAmelCase = pickle.load(_lowerCAmelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
_UpperCAmelCase = pickle.load(_lowerCAmelCase )
_UpperCAmelCase = np.maximum(_lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_UpperCAmelCase = 0.0 # do not predict special tokens
_UpperCAmelCase = torch.from_numpy(_lowerCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = LmSeqsDataset(params=_lowerCAmelCase , data=_lowerCAmelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
_UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowerCAmelCase )
else:
_UpperCAmelCase = student_model_class(_lowerCAmelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
_UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowerCAmelCase , _lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowerCAmelCase , _lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_UpperCAmelCase = Distiller(
params=_lowerCAmelCase , dataset=_lowerCAmelCase , token_probs=_lowerCAmelCase , student=_lowerCAmelCase , teacher=_lowerCAmelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 129
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = "▁"
__lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase = {
"facebook/xglm-564M": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 129
| 1
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a__ ( _A , _A ):
snake_case__ = '''pixel_values'''
snake_case__ = False
snake_case__ = TimmBackboneConfig
def __init__( self : str ,a__ : Any ,**a__ : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(self ,'''timm''')
super().__init__(a_)
_lowerCAmelCase:List[str] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''')
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.')
if hasattr(a_ ,'''out_features''') and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''')
_lowerCAmelCase:int = getattr(a_ ,'''use_pretrained_backbone''' ,a_)
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''')
# We just take the final layer by default. This matches the default for the transformers models.
_lowerCAmelCase:Tuple = config.out_indices if getattr(a_ ,'''out_indices''' ,a_) is not None else (-1,)
_lowerCAmelCase:Union[str, Any] = timm.create_model(
config.backbone ,pretrained=a_ ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=a_ ,**a_ ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowerCAmelCase:Dict = self._backbone.return_layers
_lowerCAmelCase:Optional[Any] = {layer["module"]: str(a_) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a_)
@classmethod
def __UpperCamelCase ( cls : Optional[Any] ,a__ : Dict ,*a__ : Union[str, Any] ,**a__ : int) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''vision''', '''timm'''])
from ...models.timm_backbone import TimmBackboneConfig
_lowerCAmelCase:Union[str, Any] = kwargs.pop('''config''' ,TimmBackboneConfig())
_lowerCAmelCase:str = kwargs.pop('''use_timm_backbone''' ,a_)
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''')
_lowerCAmelCase:str = kwargs.pop('''num_channels''' ,config.num_channels)
_lowerCAmelCase:Optional[Any] = kwargs.pop('''features_only''' ,config.features_only)
_lowerCAmelCase:Union[str, Any] = kwargs.pop('''use_pretrained_backbone''' ,config.use_pretrained_backbone)
_lowerCAmelCase:Dict = kwargs.pop('''out_indices''' ,config.out_indices)
_lowerCAmelCase:str = TimmBackboneConfig(
backbone=a_ ,num_channels=a_ ,features_only=a_ ,use_pretrained_backbone=a_ ,out_indices=a_ ,)
return super()._from_config(a_ ,**a_)
def __UpperCamelCase ( self : Optional[int] ,a__ : List[str]) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] ,a__ : List[Any] ,a__ : Optional[int]=None ,a__ : str=None ,a__ : Optional[Any]=None ,**a__ : str) -> int:
"""simple docstring"""
_lowerCAmelCase:Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase:Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase:str = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''')
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowerCAmelCase:int = self._all_layers
_lowerCAmelCase:Optional[Any] = self._backbone(a_ ,**a_)
_lowerCAmelCase:List[Any] = self._return_layers
_lowerCAmelCase:List[Any] = tuple(hidden_states[i] for i in self.out_indices)
else:
_lowerCAmelCase:Tuple = self._backbone(a_ ,**a_)
_lowerCAmelCase:Any = None
_lowerCAmelCase:Optional[Any] = tuple(a_)
_lowerCAmelCase:Tuple = tuple(a_) if hidden_states is not None else None
if not return_dict:
_lowerCAmelCase:List[str] = (feature_maps,)
if output_hidden_states:
_lowerCAmelCase:Dict = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a_ ,hidden_states=a_ ,attentions=a_)
| 227
|
'''simple docstring'''
import random
from typing import Any
def __A ( a_ : list ):
for _ in range(len(a_ ) ):
lowerCAmelCase : List[Any] = random.randint(0 ,len(a_ ) - 1 )
lowerCAmelCase : Tuple = random.randint(0 ,len(a_ ) - 1 )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 525
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
for attribute in key.split('''.''' ):
_lowerCamelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
_lowerCamelCase : int = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
_lowerCamelCase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCamelCase : Optional[int] = value
elif weight_type == "weight_g":
_lowerCamelCase : Dict = value
elif weight_type == "weight_v":
_lowerCamelCase : Dict = value
elif weight_type == "bias":
_lowerCamelCase : List[Any] = value
else:
_lowerCamelCase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Dict = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : List[Any] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase : Dict = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
_lowerCamelCase : str = mapped_key.replace('''*''' , _UpperCamelCase )
if "weight_g" in name:
_lowerCamelCase : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase : Union[str, Any] = '''weight_v'''
elif "weight" in name:
_lowerCamelCase : List[Any] = '''weight'''
elif "bias" in name:
_lowerCamelCase : Any = '''bias'''
else:
_lowerCamelCase : List[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase : Tuple = name.split('''.''' )
_lowerCamelCase : Tuple = int(items[0] )
_lowerCamelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : int = SEWConfig()
if is_finetuned:
_lowerCamelCase : Optional[Any] = model.wav_encoder.wav_model.cfg
else:
_lowerCamelCase : Dict = model.cfg
_lowerCamelCase : List[str] = fs_config.conv_bias
_lowerCamelCase : List[str] = eval(fs_config.conv_feature_layers )
_lowerCamelCase : Dict = [x[0] for x in conv_layers]
_lowerCamelCase : Union[str, Any] = [x[1] for x in conv_layers]
_lowerCamelCase : Optional[Any] = [x[2] for x in conv_layers]
_lowerCamelCase : List[str] = '''gelu'''
_lowerCamelCase : str = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_lowerCamelCase : List[str] = 0.0
_lowerCamelCase : Tuple = fs_config.activation_fn.name
_lowerCamelCase : List[Any] = fs_config.encoder_embed_dim
_lowerCamelCase : Any = 0.0_2
_lowerCamelCase : List[Any] = fs_config.encoder_ffn_embed_dim
_lowerCamelCase : Tuple = 1e-5
_lowerCamelCase : List[Any] = fs_config.encoder_layerdrop
_lowerCamelCase : Union[str, Any] = fs_config.encoder_attention_heads
_lowerCamelCase : Union[str, Any] = fs_config.conv_pos_groups
_lowerCamelCase : Union[str, Any] = fs_config.conv_pos
_lowerCamelCase : Optional[int] = len(_UpperCamelCase )
_lowerCamelCase : str = fs_config.encoder_layers
_lowerCamelCase : Tuple = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_lowerCamelCase : Tuple = model.cfg
_lowerCamelCase : int = fs_config.final_dropout
_lowerCamelCase : Optional[int] = fs_config.layerdrop
_lowerCamelCase : int = fs_config.activation_dropout
_lowerCamelCase : Dict = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_lowerCamelCase : Optional[Any] = fs_config.attention_dropout
_lowerCamelCase : Dict = fs_config.dropout_input
_lowerCamelCase : Union[str, Any] = fs_config.dropout
_lowerCamelCase : List[str] = fs_config.mask_channel_length
_lowerCamelCase : int = fs_config.mask_channel_prob
_lowerCamelCase : Any = fs_config.mask_length
_lowerCamelCase : Any = fs_config.mask_prob
_lowerCamelCase : str = '''Wav2Vec2FeatureExtractor'''
_lowerCamelCase : List[str] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True ):
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_lowerCamelCase : Any = SEWConfig.from_pretrained(_UpperCamelCase )
else:
_lowerCamelCase : Union[str, Any] = convert_config(model[0] , _UpperCamelCase )
_lowerCamelCase : List[str] = model[0].eval()
_lowerCamelCase : Optional[int] = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Dict = target_dict.pad_index
_lowerCamelCase : Optional[Any] = target_dict.bos_index
_lowerCamelCase : List[Any] = target_dict.pad_index
_lowerCamelCase : Any = target_dict.bos_index
_lowerCamelCase : Optional[int] = target_dict.eos_index
_lowerCamelCase : Tuple = len(target_dict.symbols )
_lowerCamelCase : Union[str, Any] = os.path.join(_UpperCamelCase , '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
_lowerCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCamelCase , )
_lowerCamelCase : str = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
_lowerCamelCase : str = SEWForCTC(_UpperCamelCase )
else:
_lowerCamelCase : Any = SEWModel(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 721
|
import pytest
import datasets
# Import fixture modules as plugins
_lowerCamelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __UpperCAmelCase( lowercase_ ):
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_lowerCamelCase : Optional[Any] = tmp_path_factory.getbasetemp() / '''cache'''
_lowerCamelCase : Optional[Any] = test_hf_cache_home / '''datasets'''
_lowerCamelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
_lowerCamelCase : Dict = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
_lowerCamelCase : str = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
_lowerCamelCase : Optional[int] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __UpperCAmelCase( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ ):
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __UpperCAmelCase( lowercase_ ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 613
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE (a__ ):
pass
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = data
__A : Node | None = None
def __iter__( self):
'''simple docstring'''
__A : Dict = self
__A : List[Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_UpperCAmelCase)
yield node.data
__A : Optional[int] = node.next_node
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase__ : Tuple = Node(1)
lowercase__ : Optional[Any] = Node(2)
lowercase__ : Union[str, Any] = Node(3)
lowercase__ : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
lowercase__ : int = root_node.next_node
print(root_node.has_loop) # True
lowercase__ : Optional[int] = Node(5)
lowercase__ : List[str] = Node(6)
lowercase__ : Tuple = Node(5)
lowercase__ : Dict = Node(6)
print(root_node.has_loop) # False
lowercase__ : List[Any] = Node(1)
print(root_node.has_loop) # False
| 8
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "xlm"
_UpperCamelCase : str = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , a__=30145 , a__=2048 , a__=12 , a__=16 , a__=0.1 , a__=0.1 , a__=True , a__=False , a__=False , a__=False , a__=1 , a__=True , a__=512 , a__=2048**-0.5 , a__=1e-12 , a__=0.0_2 , a__=0 , a__=1 , a__=2 , a__=3 , a__=5 , a__=True , a__="first" , a__=True , a__=None , a__=True , a__=0.1 , a__=5 , a__=5 , a__=0 , a__=0 , a__=2 , a__=0 , **a__ , ):
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = emb_dim
_lowerCAmelCase : Union[str, Any] = n_layers
_lowerCAmelCase : str = n_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : Union[str, Any] = attention_dropout
_lowerCAmelCase : Optional[Any] = gelu_activation
_lowerCAmelCase : Tuple = sinusoidal_embeddings
_lowerCAmelCase : Optional[int] = causal
_lowerCAmelCase : List[str] = asm
_lowerCAmelCase : Dict = n_langs
_lowerCAmelCase : str = use_lang_emb
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = bos_index
_lowerCAmelCase : int = eos_index
_lowerCAmelCase : str = pad_index
_lowerCAmelCase : List[str] = unk_index
_lowerCAmelCase : Optional[int] = mask_index
_lowerCAmelCase : str = is_encoder
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = embed_init_std
_lowerCAmelCase : Optional[int] = init_std
_lowerCAmelCase : Optional[Any] = summary_type
_lowerCAmelCase : Union[str, Any] = summary_use_proj
_lowerCAmelCase : List[Any] = summary_activation
_lowerCAmelCase : Union[str, Any] = summary_proj_to_labels
_lowerCAmelCase : Optional[Any] = summary_first_dropout
_lowerCAmelCase : Optional[Any] = start_n_top
_lowerCAmelCase : List[Any] = end_n_top
_lowerCAmelCase : List[str] = mask_token_id
_lowerCAmelCase : Optional[int] = lang_id
if "n_words" in kwargs:
_lowerCAmelCase : Tuple = kwargs["""n_words"""]
super().__init__(pad_token_id=a__ , bos_token_id=a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 213
| 0
|
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
lowerCAmelCase__ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCAmelCase__ : Any = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 69
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 69
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCAmelCase = get_logger(__name__)
class A_ ( enum.Enum ):
'''simple docstring'''
_UpperCamelCase : Tuple = """all_checks"""
_UpperCamelCase : Optional[Any] = """basic_checks"""
_UpperCamelCase : str = """no_checks"""
class A_ ( __lowerCamelCase ):
'''simple docstring'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) )
if len(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) )
lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) )
if len(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE ) ) )
lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(__SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True ):
if record_checksum:
lowercase = shaaaa()
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__SCREAMING_SNAKE_CASE )
lowercase = m.hexdigest()
else:
lowercase = None
return {"num_bytes": os.path.getsize(__SCREAMING_SNAKE_CASE ), "checksum": checksum}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 84
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : Any ) -> Optional[Any]:
a__ = b.T
a__ = np.sum(np.square(__lowerCamelCase ) , axis=1 )
a__ = np.sum(np.square(__lowerCamelCase ) , axis=0 )
a__ = np.matmul(__lowerCamelCase , __lowerCamelCase )
a__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def _lowerCamelCase (__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Tuple:
a__ = x.reshape(-1 , 3 )
a__ = squared_euclidean_distance(__lowerCamelCase , __lowerCamelCase )
return np.argmin(__lowerCamelCase , axis=1 )
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : str , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : bool = True , lowerCamelCase : bool = True , **lowerCamelCase : Any , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
a__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
a__ = get_size_dict(lowerCamelCase )
a__ = np.array(lowerCamelCase ) if clusters is not None else None
a__ = do_resize
a__ = size
a__ = resample
a__ = do_normalize
a__ = do_color_quantize
def __a ( self : List[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Any , ):
'''simple docstring'''
a__ = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowerCamelCase , size=(size["height"], size["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self : str , lowerCamelCase : np.ndarray , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
a__ = rescale(image=lowerCamelCase , scale=1 / 127.5 , data_format=lowerCamelCase )
a__ = image - 1
return image
def __a ( self : str , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(lowerCamelCase )
a__ = resample if resample is not None else self.resample
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__ = clusters if clusters is not None else self.clusters
a__ = np.array(lowerCamelCase )
a__ = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
a__ = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_normalize:
a__ = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
a__ = [to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__ = np.array(lowerCamelCase )
a__ = color_quantize(lowerCamelCase , lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a__ = images.shape[0]
a__ = images.reshape(lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__ = list(lowerCamelCase )
else:
a__ = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
a__ = {"input_ids": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 489
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "RegNetConfig"
# Base docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = [1, 1088, 7, 7]
# Image classification docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = "tabby, tabby cat"
_snake_case = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( tf.keras.layers.Layer ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCamelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCamelCase__ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , strides=SCREAMING_SNAKE_CASE__ , padding='VALID' , groups=SCREAMING_SNAKE_CASE__ , use_bias=SCREAMING_SNAKE_CASE__ , name='convolution' , )
lowerCamelCase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
lowerCamelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = self.convolution(self.padding(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = self.normalization(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , **SCREAMING_SNAKE_CASE__ : List[Any] ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config.num_channels
lowerCamelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = shape_list(SCREAMING_SNAKE_CASE__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCamelCase__ = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 2, 3, 1) )
lowerCamelCase__ = self.embedder(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , **SCREAMING_SNAKE_CASE__ : str ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE__ , use_bias=SCREAMING_SNAKE_CASE__ , name='convolution' )
lowerCamelCase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : bool = False ):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
class _a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ , name='pooler' )
lowerCamelCase__ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowerCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE__ )
for layer_module in self.attention:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_state * pooled
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , **SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = in_channels != out_channels or stride != 1
lowerCamelCase__ = max(1 , out_channels // config.groups_width )
lowerCamelCase__ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCamelCase__ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ , name='layer.2' ),
]
lowerCamelCase__ = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = hidden_state
for layer_module in self.layers:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowerCamelCase__ = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , **SCREAMING_SNAKE_CASE__ : Any ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = in_channels != out_channels or stride != 1
lowerCamelCase__ = max(1 , out_channels // config.groups_width )
lowerCamelCase__ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
lowerCamelCase__ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ , name='layer.3' ),
]
lowerCamelCase__ = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = hidden_state
for layer_module in self.layers:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowerCamelCase__ = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
lowerCamelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='layers.0' ),
*[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
for layer_module in self.layers:
lowerCamelCase__ = layer_module(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _a ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : RegNetConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
lowerCamelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ , name=F'stages.{i+1}' ) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__ = hidden_states + (hidden_state,)
lowerCamelCase__ = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
lowerCamelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
@keras_serializable
class _a ( tf.keras.layers.Layer ):
a_ : Optional[int] = RegNetConfig
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config
lowerCamelCase__ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ , name='embedder' )
lowerCamelCase__ = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ , name='encoder' )
lowerCamelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ , name='pooler' )
@unpack_inputs
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.embedder(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = encoder_outputs[0]
lowerCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE__ )
# Change to NCHW output format have uniformity in the modules
lowerCamelCase__ = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) )
lowerCamelCase__ = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCamelCase__ = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = RegNetConfig
a_ : Optional[int] = 'regnet'
a_ : str = 'pixel_values'
@property
def _UpperCamelCase ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
_snake_case = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE_ , )
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Any=False , ):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE_ , )
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : RegNetConfig , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ):
super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config.num_labels
lowerCamelCase__ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ , name='regnet' )
# classification head
lowerCamelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : tf.Tensor = None , SCREAMING_SNAKE_CASE__ : tf.Tensor = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : List[str]=False , ):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.regnet(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ = self.classifier[0](SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.classifier[1](SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ )
if not return_dict:
lowerCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 659
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659
| 1
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 229
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ,_a : Callable ,_a : Optional[Features] = None ,_a : str = None ,_a : bool = False ,_a : bool = False ,_a : Optional[dict] = None ,_a : Optional[int] = None ,**_a : str ,):
'''simple docstring'''
super().__init__(
features=_a ,cache_dir=_a ,keep_in_memory=_a ,streaming=_a ,num_proc=_a ,**_a ,)
_a : str = Generator(
cache_dir=_a ,features=_a ,generator=_a ,gen_kwargs=_a ,**_a ,)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
_a : str = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Union[str, Any] = None
_a : Tuple = None
self.builder.download_and_prepare(
download_config=_a ,download_mode=_a ,verification_mode=_a ,base_path=_a ,num_proc=self.num_proc ,)
_a : Optional[Any] = self.builder.as_dataset(
split='train' ,verification_mode=_a ,in_memory=self.keep_in_memory )
return dataset
| 229
| 1
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _UpperCAmelCase ( __A : Optional[int] , __A : Dict , __A : Union[str, Any]=[] ):
a_ : Tuple = size[0] - overlap_pixels * 2
a_ : Optional[int] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a_ : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
a_ : int = np.pad(__snake_case , mode='''linear_ramp''' , pad_width=__snake_case , end_values=0 )
if "l" in remove_borders:
a_ : Tuple = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a_ : List[str] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a_ : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a_ : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _UpperCAmelCase ( __A : List[Any] , __A : List[Any] , __A : List[str] ):
return max(__snake_case , min(__snake_case , __snake_case ) )
def _UpperCAmelCase ( __A : Any , __A : List[str] , __A : str ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _UpperCAmelCase ( __A : Any , __A : int , __A : Optional[int] ):
a_ : List[Any] = list(__snake_case )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a_ : List[str] = clamp_rect(__snake_case , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Optional[Any] , __A : str , __A : Optional[int] ):
a_ : Tuple = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__snake_case , (original_slice, 0) )
return result
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Tuple ):
a_ : Tuple = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a_ : int = tile.crop(__snake_case )
return tile
def _UpperCAmelCase ( __A : Optional[int] , __A : Union[str, Any] ):
a_ : Any = n % d
return n - divisor
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] = 350 , ) -> int:
super().__init__(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , low_res_scheduler=__lowerCAmelCase , scheduler=__lowerCAmelCase , max_noise_level=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : Dict = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
a_ : int = add_overlap_rect(__lowerCAmelCase , __lowerCAmelCase , image.size )
a_ : str = image.crop(__lowerCAmelCase )
a_ : List[str] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a_ : Optional[Any] = translated_slice_x - (original_image_slice / 2)
a_ : Optional[Any] = max(0 , __lowerCAmelCase )
a_ : List[Any] = squeeze_tile(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a_ : str = to_input.size
a_ : int = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
a_ : Union[str, Any] = super(__lowerCAmelCase , self ).__call__(image=__lowerCAmelCase , **__lowerCAmelCase ).images[0]
a_ : Union[str, Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
a_ : Optional[Any] = unsqueeze_tile(__lowerCAmelCase , __lowerCAmelCase )
a_ : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
a_ : List[str] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
a_ : Optional[int] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCAmelCase ) , mode='''L''' , )
final_image.paste(
__lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCAmelCase )
@torch.no_grad()
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] = 75 , __SCREAMING_SNAKE_CASE : Dict = 9.0 , __SCREAMING_SNAKE_CASE : List[str] = 50 , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Any = 0.0 , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Optional[Any] = None , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Any = 1 , __SCREAMING_SNAKE_CASE : List[str] = 128 , __SCREAMING_SNAKE_CASE : str = 32 , __SCREAMING_SNAKE_CASE : Tuple = 32 , ) -> Optional[Any]:
a_ : List[str] = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
a_ : Tuple = math.ceil(image.size[0] / tile_size )
a_ : Tuple = math.ceil(image.size[1] / tile_size )
a_ : str = tcx * tcy
a_ : Union[str, Any] = 0
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
self._process_tile(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prompt=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , noise_level=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _UpperCAmelCase ( ):
a_ : int = '''stabilityai/stable-diffusion-x4-upscaler'''
a_ : Any = StableDiffusionTiledUpscalePipeline.from_pretrained(__snake_case , revision='''fp16''' , torch_dtype=torch.floataa )
a_ : Tuple = pipe.to('''cuda''' )
a_ : Tuple = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__A : Optional[Any] ):
print(f'progress: {obj["progress"]:.4f}' )
obj["image"].save('''diffusers_library_progress.jpg''' )
a_ : Any = pipe(image=__snake_case , prompt='''Black font, white background, vector''' , noise_level=40 , callback=__snake_case )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : str ) -> List[str]:
'''simple docstring'''
if not nums:
return 0
lowerCAmelCase = nums[0]
lowerCAmelCase = 0
for num in nums[1:]:
lowerCAmelCase , lowerCAmelCase = (
max_excluding + num,
max(__lowercase , __lowercase ),
)
return max(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23
| 0
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = CLIPTokenizer
UpperCAmelCase__ = CLIPTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = {}
UpperCAmelCase__ = False
def A_ ( self : Union[str, Any] ) -> List[Any]:
super().setUp()
# fmt: off
lowerCamelCase__ : str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCamelCase__ : Union[str, Any] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase__ : str = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowerCamelCase__ : int = {'unk_token': '<unk>'}
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase ) )
def A_ ( self : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : str , **UpperCAmelCase : List[str] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'lower newer'
lowerCamelCase__ : str = 'lower newer'
return input_text, output_text
def A_ ( self : int ) -> Dict:
lowerCamelCase__ : str = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ : Optional[int] = 'lower newer'
lowerCamelCase__ : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowerCamelCase__ : Tuple = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokens + [tokenizer.unk_token]
lowerCamelCase__ : List[str] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
@require_ftfy
def A_ ( self : str ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : List[Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowerCamelCase__ : str = tokenizer_s.tokenize(UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCamelCase__ : Tuple = 'xa\u0303y' + ' ' + 'x\xe3y'
lowerCamelCase__ : int = tokenizer_s.tokenize(UpperCAmelCase )
lowerCamelCase__ : int = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCamelCase__ : Any = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCamelCase__ : str = tokenizer_s.tokenize(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCamelCase__ : int = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCamelCase__ : Optional[Any] = tokenizer_s.tokenize(UpperCAmelCase )
lowerCamelCase__ : int = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : Tuple = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ : str = F"""{text_of_1_token} {text_of_1_token}"""
lowerCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , )
lowerCamelCase__ : int = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
lowerCamelCase__ : Optional[Any] = F""" {text}"""
lowerCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ) + 1, 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
def A_ ( self : Any ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def A_ ( self : Any ) -> Tuple:
super().test_tokenization_python_rust_equals()
def A_ ( self : Tuple ) -> str:
# CLIP always lower cases letters
pass
| 721
|
class lowerCAmelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : list ) -> None:
lowerCamelCase__ : int = set_counts
lowerCamelCase__ : List[str] = max(UpperCAmelCase )
lowerCamelCase__ : Dict = len(UpperCAmelCase )
lowerCamelCase__ : List[str] = [1] * num_sets
lowerCamelCase__ : List[str] = list(range(UpperCAmelCase ) )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int ) -> bool:
lowerCamelCase__ : List[str] = self.get_parent(UpperCAmelCase )
lowerCamelCase__ : List[str] = self.get_parent(UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCamelCase__ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Tuple = src_parent
lowerCamelCase__ : List[str] = self.set_counts[src_parent]
lowerCamelCase__ : Optional[int] = max(self.max_set , UpperCAmelCase )
return True
def A_ ( self : str , UpperCAmelCase : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCamelCase__ : Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 188
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ : List[Any] = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__UpperCamelCase ) , torch_builtin(__UpperCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(__UpperCamelCase ) , gelu_new(__UpperCamelCase ) ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ : str = get_activation("gelu" )
lowercase__ : str = get_activation("gelu_10" )
lowercase__ : Dict = torch_builtin(__UpperCamelCase )
lowercase__ : Union[str, Any] = geluaa(__UpperCamelCase )
lowercase__ : Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__UpperCamelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case ( self : List[Any] ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__UpperCamelCase ):
get_activation("bogus" )
with self.assertRaises(__UpperCamelCase ):
get_activation(__UpperCamelCase )
def snake_case ( self : List[Any] ):
lowercase__ : Dict = get_activation("gelu" )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__UpperCamelCase ):
lowercase__ : Dict = acta.a
| 496
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __snake_case ( _UpperCamelCase ) -> Optional[Any]:
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> List[str]:
_a , _a = emb.weight.shape
_a = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
_a = emb.weight.data
return lin_layer
def __snake_case ( _UpperCamelCase , _UpperCamelCase="facebook/mbart-large-en-ro" , _UpperCamelCase=False , _UpperCamelCase=False ) -> Union[str, Any]:
_a = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_UpperCamelCase )
_a = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_a = MBartConfig.from_pretrained(_UpperCamelCase , vocab_size=_UpperCamelCase )
if mbart_aa and finetuned:
_a = '''relu'''
_a = state_dict['''decoder.embed_tokens.weight''']
_a = MBartForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase )
if finetuned:
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase :Optional[int] = parser.parse_args()
lowerCamelCase :Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 487
| 0
|
"""simple docstring"""
import qiskit
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> qiskit.result.counts.Counts:
lowerCAmelCase__ : List[str] = qiskit.Aer.get_backend("""aer_simulator""" )
lowerCAmelCase__ : List[str] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase__ : Dict = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
_A = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 507
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCAmelCase__ : List[str] = test_metrics
@require_cpu
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase__ : str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 507
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
A = {
"""camembert-base""": 512,
}
A = """▁"""
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = CamembertTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : Union[str, Any]="<mask>" , UpperCamelCase_ : Any=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
__UpperCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_):
copyfile(self.vocab_file , UpperCamelCase_)
return (out_vocab_file,)
| 77
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__UpperCAmelCase : Union[str, Any] = n - k
# Calculate C(n,k)
for i in range(UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1)
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
__UpperCAmelCase : Optional[Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase )
if __name__ == "__main__":
A = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 77
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( _A , _A ) -> List[str]:
# save results
if os.path.exists(_A ):
if os.path.exists(os.path.join(_A , """config.json""" ) ) and os.path.isfile(
os.path.join(_A , """config.json""" ) ):
os.remove(os.path.join(_A , """config.json""" ) )
if os.path.exists(os.path.join(_A , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(_A , """pytorch_model.bin""" ) ):
os.remove(os.path.join(_A , """pytorch_model.bin""" ) )
else:
os.makedirs(_A )
model.save_pretrained(_A )
def UpperCamelCase ( _A , _A=False ) -> Union[str, Any]:
lowercase : List[str] = 2
if unlogit:
lowercase : List[str] = torch.pow(_A , _A )
lowercase : int = p * torch.log(_A )
lowercase : List[str] = 0
return -plogp.sum(dim=-1 )
def UpperCamelCase ( _A ) -> int:
logger.info("""lv, h >\t""" + """\t""".join(F"""{x + 1}""" for x in range(len(_A ) ) ) )
for row in range(len(_A ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def UpperCamelCase ( _A , _A , _A , _A=True , _A=True , _A=None , _A=False ) -> List[str]:
lowercase , lowercase : str = model.config.num_hidden_layers, model.config.num_attention_heads
lowercase : List[Any] = torch.zeros(_A , _A ).to(args.device )
lowercase : List[Any] = torch.zeros(_A , _A ).to(args.device )
if head_mask is None:
lowercase : List[str] = torch.ones(_A , _A ).to(args.device )
head_mask.requires_grad_(requires_grad=_A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase : str = None
lowercase : str = 0.0
lowercase : int = 0.0
for step, inputs in enumerate(tqdm(_A , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowercase : List[Any] = tuple(t.to(args.device ) for t in inputs )
((lowercase) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase : List[Any] = model(_A , labels=_A , head_mask=_A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase , lowercase , lowercase : List[str] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_A ):
lowercase : str = entropy(attn.detach() , _A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase : List[str] = 2
lowercase : str = torch.pow(torch.pow(_A , _A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
lowercase : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(_A )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(_A )
logger.info("""Head ranked by importance scores""" )
lowercase : Union[str, Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase : int = torch.arange(
head_importance.numel() , device=args.device )
lowercase : List[Any] = head_ranks.view_as(_A )
print_ad_tensor(_A )
return attn_entropy, head_importance, total_loss
def UpperCamelCase ( _A , _A , _A ) -> Union[str, Any]:
lowercase , lowercase , lowercase : Union[str, Any] = compute_heads_importance(_A , _A , _A , compute_entropy=_A )
lowercase : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , _A , original_score * args.masking_threshold )
lowercase : Union[str, Any] = torch.ones_like(_A )
lowercase : str = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase : Any = original_score
while current_score >= original_score * args.masking_threshold:
lowercase : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase : int = float("""Inf""" )
lowercase : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(_A ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowercase : Tuple = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowercase : Any = new_head_mask.view(-1 )
lowercase : Any = 0.0
lowercase : Dict = new_head_mask.view_as(_A )
lowercase : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(_A )
# Compute metric and head importance again
lowercase , lowercase , lowercase : Tuple = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , head_mask=_A )
lowercase : Any = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , _A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(_A )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCamelCase ( _A , _A , _A , _A ) -> Optional[int]:
lowercase : Union[str, Any] = datetime.now()
lowercase , lowercase , lowercase : Tuple = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A )
lowercase : List[str] = 1 / loss
lowercase : int = datetime.now() - before_time
lowercase : Optional[int] = sum(p.numel() for p in model.parameters() )
lowercase : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_A ) )
}
for k, v in heads_to_prune.items():
if isinstance(_A , _A ):
lowercase : List[Any] = [
v,
]
assert sum(len(_A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_A )
lowercase : Dict = sum(p.numel() for p in model.parameters() )
lowercase : str = datetime.now()
lowercase , lowercase , lowercase : Tuple = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A , actually_pruned=_A , )
lowercase : Union[str, Any] = 1 / loss
lowercase : Optional[int] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , _A , _A , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , _A , _A )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(_A , args.output_dir )
def UpperCamelCase ( ) -> Dict:
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_A , type=_A , required=_A , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_A , type=_A , required=_A , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=_A , type=_A , required=_A , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=_A , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=_A , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=_A , type=_A , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=_A , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=_A , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=_A , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=_A , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=_A , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=_A , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=_A , default=42 )
parser.add_argument("""--local_rank""" , type=_A , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=_A , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_A , default="""""" , help="""Can be used for distant debugging.""" )
lowercase : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase : int = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowercase : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase : Tuple = torch.device("""cuda""" , args.local_rank )
lowercase : List[str] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase : Any = nn.parallel.DistributedDataParallel(
_A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_A )
elif args.n_gpu > 1:
lowercase : Any = nn.DataParallel(_A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_A )
torch.save(_A , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , _A )
# Prepare dataset
lowercase : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase : Union[str, Any] = (torch.from_numpy(_A ),)
lowercase : Optional[Any] = TensorDataset(*_A )
lowercase : Union[str, Any] = RandomSampler(_A )
lowercase : Optional[Any] = DataLoader(_A , sampler=_A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_A , _A , _A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase : Any = mask_heads(_A , _A , _A )
prune_heads(_A , _A , _A , _A )
if __name__ == "__main__":
main()
| 348
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase (__snake_case , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase (unittest.TestCase ):
@property
def __snake_case ( self :Union[str, Any] ) ->List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self :Union[str, Any] ) ->Optional[Any]:
lowercase : Optional[Any] = ort.SessionOptions()
lowercase : List[Any] = False
return options
def __snake_case ( self :int ) ->Union[str, Any]:
lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowercase : List[Any] = """A red cat sitting on a park bench"""
lowercase : List[str] = np.random.RandomState(0 )
lowercase : str = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=10 , generator=__magic_name__ , output_type="""np""" , )
lowercase : Optional[Any] = output.images
lowercase : List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __snake_case ( self :int ) ->Optional[int]:
lowercase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
lowercase : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowercase : Union[str, Any] = """A red cat sitting on a park bench"""
lowercase : Tuple = np.random.RandomState(0 )
lowercase : Union[str, Any] = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=20 , generator=__magic_name__ , output_type="""np""" , )
lowercase : List[Any] = output.images
lowercase : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase : Dict = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 348
| 1
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase = 10
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for i in range(__lowerCAmelCase ,__lowerCAmelCase ):
if array[i] == target:
return i
return -1
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : str = 0
A_ : Any = len(__lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
A_ : Dict = (left + right) // 3 + 1
A_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A_ : Optional[int] = one_third - 1
elif array[two_third] < target:
A_ : Optional[int] = two_third + 1
else:
A_ : List[str] = one_third + 1
A_ : Union[str, Any] = two_third - 1
else:
return -1
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
A_ : Dict = (left + right) // 3 + 1
A_ : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCAmelCase ,one_third - 1 ,__lowerCAmelCase ,__lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,__lowerCAmelCase ,__lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
_lowerCAmelCase = ite_ternary_search(collection, target)
_lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 569
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "weight" in name:
lowercase_ = """weight"""
elif "bias" in name:
lowercase_ = """bias"""
else:
lowercase_ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = SEWConfig()
if is_finetuned:
lowercase_ = model.wav_encoder.wav_model.cfg
else:
lowercase_ = model.cfg
lowercase_ = fs_config.conv_bias
lowercase_ = eval(fs_config.conv_feature_layers )
lowercase_ = [x[0] for x in conv_layers]
lowercase_ = [x[1] for x in conv_layers]
lowercase_ = [x[2] for x in conv_layers]
lowercase_ = """gelu"""
lowercase_ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowercase_ = 0.0
lowercase_ = fs_config.activation_fn.name
lowercase_ = fs_config.encoder_embed_dim
lowercase_ = 0.02
lowercase_ = fs_config.encoder_ffn_embed_dim
lowercase_ = 1E-5
lowercase_ = fs_config.encoder_layerdrop
lowercase_ = fs_config.encoder_attention_heads
lowercase_ = fs_config.conv_pos_groups
lowercase_ = fs_config.conv_pos
lowercase_ = len(__lowerCAmelCase )
lowercase_ = fs_config.encoder_layers
lowercase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ = model.cfg
lowercase_ = fs_config.final_dropout
lowercase_ = fs_config.layerdrop
lowercase_ = fs_config.activation_dropout
lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ = fs_config.attention_dropout
lowercase_ = fs_config.dropout_input
lowercase_ = fs_config.dropout
lowercase_ = fs_config.mask_channel_length
lowercase_ = fs_config.mask_channel_prob
lowercase_ = fs_config.mask_length
lowercase_ = fs_config.mask_prob
lowercase_ = """Wav2Vec2FeatureExtractor"""
lowercase_ = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
lowercase_ = convert_config(model[0] , __lowerCAmelCase )
lowercase_ = model[0].eval()
lowercase_ = True if config.feat_extract_norm == """layer""" else False
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
lowercase_ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.eos_index
lowercase_ = len(target_dict.symbols )
lowercase_ = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
lowercase_ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
lowercase_ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowercase_ = SEWForCTC(__lowerCAmelCase )
else:
lowercase_ = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 567
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase : Dict = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE = os.path.join(UpperCamelCase__ , '''words.txt''' )
_SCREAMING_SNAKE_CASE = ''''''
with open(UpperCamelCase__ ) as f:
_SCREAMING_SNAKE_CASE = f.readline()
_SCREAMING_SNAKE_CASE = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(UpperCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 701
|
"""simple docstring"""
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
_SCREAMING_SNAKE_CASE = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def A__ ( UpperCamelCase__ = 1_777 , UpperCamelCase__ = 1_855 , UpperCamelCase__ = 8 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = base
for _ in range(1 , UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 168
| 0
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> List[str]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
_snake_case = torch.tensor(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ).unsqueeze(0 ) # Batch size 1
_snake_case = model(lowerCAmelCase_ )[0] # The last hidden-state is the first element of the output tuple
_snake_case = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_snake_case = logits[0, masked_index, :]
_snake_case = logits.softmax(dim=0 )
_snake_case , _snake_case = prob.topk(k=lowerCAmelCase_ , dim=0 )
_snake_case = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase_ ) )] )
_snake_case = tokenizer.mask_token
_snake_case = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_snake_case = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(lowerCAmelCase_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(lowerCAmelCase_ ) , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase_ , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
snake_case = CamembertTokenizer.from_pretrained('''camembert-base''')
snake_case = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
snake_case = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 103
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A_ ( a=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = subparsers.add_parser('test' )
else:
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=a , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=a )
return parser
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
SCREAMING_SNAKE_CASE_ : List[Any] = script_name
else:
SCREAMING_SNAKE_CASE_ : Any = f"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['accelerate-launch'] + test_args.split()
SCREAMING_SNAKE_CASE_ : Any = execute_subprocess_async(a , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = test_command_parser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
test_command(a )
if __name__ == "__main__":
main()
| 511
| 0
|
from __future__ import annotations
def lowercase_ (A : list , A : int | None = None , A : int | None = None ):
if start is None:
snake_case__ : str = 0
if end is None:
snake_case__ : List[str] = len(A ) - 1
if start >= end:
return
snake_case__ : Union[str, Any] = (start + end) // 2
slowsort(A , A , A )
slowsort(A , mid + 1 , A )
if sequence[end] < sequence[mid]:
snake_case__ , snake_case__ : Union[str, Any] = sequence[mid], sequence[end]
slowsort(A , A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 243
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a_ :int = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
a_ :str = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
a_ :int = "|".join(sys.argv[1:])
a_ :int = re.compile(RF"""^({joined_dirs}).*?\.py$""")
a_ :str = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 243
| 1
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = tf.convert_to_tensor(lowerCamelCase_ )
_UpperCAmelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(lowerCamelCase_ )
_UpperCAmelCase : Union[str, Any] = tf.cast(math.pi , x.dtype )
_UpperCAmelCase : str = tf.cast(0.04_4715 , x.dtype )
_UpperCAmelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase_ , 3 )) ))
return x * cdf
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase_ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase_ ) )
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = tf.convert_to_tensor(lowerCamelCase_ )
_UpperCAmelCase : int = tf.cast(0.04_4715 , x.dtype )
_UpperCAmelCase : List[Any] = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = tf.convert_to_tensor(lowerCamelCase_ )
_UpperCAmelCase : int = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __A ( lowerCAmelCase_ ):
return tf.clip_by_value(_gelu(lowerCamelCase_ ) , -10 , 10 )
def __A ( lowerCAmelCase_ , lowerCAmelCase_=-1 ):
_UpperCAmelCase : Optional[Any] = tf.split(lowerCamelCase_ , 2 , axis=lowerCamelCase_ )
return a * tf.math.sigmoid(lowerCamelCase_ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __A ( lowerCAmelCase_ ):
return tf.keras.activations.gelu(lowerCamelCase_ , approximate=lowerCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = tf.keras.activations.gelu
lowerCAmelCase_ : Union[str, Any] = approximate_gelu_wrap
else:
lowerCAmelCase_ : str = _gelu
lowerCAmelCase_ : Dict = _gelu_new
lowerCAmelCase_ : List[str] = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __A ( lowerCAmelCase_ ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 414
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18
|
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _SCREAMING_SNAKE_CASE ( __a ):
a_ : str = ["""image_processor""", """tokenizer"""]
a_ : Dict = """OwlViTImageProcessor"""
a_ : Tuple = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
__UpperCAmelCase =kwargs.pop('''feature_extractor''')
__UpperCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_A , _A)
def __call__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="max_length" , UpperCAmelCase="np" , **UpperCAmelCase):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''')
if text is not None:
if isinstance(_A , _A) or (isinstance(_A , _A) and not isinstance(text[0] , _A)):
__UpperCAmelCase =[self.tokenizer(_A , padding=_A , return_tensors=_A , **_A)]
elif isinstance(_A , _A) and isinstance(text[0] , _A):
__UpperCAmelCase =[]
# Maximum number of queries across batch
__UpperCAmelCase =max([len(_A) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_A) != max_num_queries:
__UpperCAmelCase =t + [""" """] * (max_num_queries - len(_A))
__UpperCAmelCase =self.tokenizer(_A , padding=_A , return_tensors=_A , **_A)
encodings.append(_A)
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''')
if return_tensors == "np":
__UpperCAmelCase =np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
__UpperCAmelCase =np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCAmelCase =jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
__UpperCAmelCase =jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCAmelCase =torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0)
__UpperCAmelCase =torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCAmelCase =tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0)
__UpperCAmelCase =tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
else:
raise ValueError('''Target return tensor type could not be returned''')
__UpperCAmelCase =BatchEncoding()
__UpperCAmelCase =input_ids
__UpperCAmelCase =attention_mask
if query_images is not None:
__UpperCAmelCase =BatchEncoding()
__UpperCAmelCase =self.image_processor(
_A , return_tensors=_A , **_A).pixel_values
__UpperCAmelCase =query_pixel_values
if images is not None:
__UpperCAmelCase =self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
__UpperCAmelCase =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCAmelCase =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.image_processor.post_process(*_A , **_A)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_A , **_A)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_A , **_A)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A)
@property
def A__ (self):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def A__ (self):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 132
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( __a):
__a : Optional[Any] = """SpeechT5FeatureExtractor"""
__a : Dict = """SpeechT5Tokenizer"""
def __init__( self , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A )
def __call__( self , *_A , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = kwargs.pop("""audio""" , _A )
_UpperCAmelCase : Tuple = kwargs.pop("""text""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""text_target""" , _A )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""audio_target""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""sampling_rate""" , _A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
elif text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(_A , **_A )
else:
_UpperCAmelCase : Optional[int] = None
if audio_target is not None:
_UpperCAmelCase : List[Any] = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_values"""]
elif text_target is not None:
_UpperCAmelCase : Optional[int] = self.tokenizer(_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : List[str] = labels
_UpperCAmelCase : List[str] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[int] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("""input_values""" , _A )
_UpperCAmelCase : List[Any] = kwargs.pop("""input_ids""" , _A )
_UpperCAmelCase : int = kwargs.pop("""labels""" , _A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
_UpperCAmelCase : Optional[int] = self.feature_extractor.pad(_A , *_A , **_A )
elif input_ids is not None:
_UpperCAmelCase : Tuple = self.tokenizer.pad(_A , **_A )
else:
_UpperCAmelCase : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]):
_UpperCAmelCase : Optional[Any] = self.tokenizer.pad(_A , **_A )
_UpperCAmelCase : Optional[Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = self.feature_extractor.feature_size
_UpperCAmelCase : Tuple = self.feature_extractor.num_mel_bins
_UpperCAmelCase : List[str] = self.feature_extractor.pad(_A , *_A , **_A )
_UpperCAmelCase : List[Any] = feature_size_hack
_UpperCAmelCase : Dict = targets["""input_values"""]
else:
_UpperCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : Union[str, Any] = labels
_UpperCAmelCase : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __snake_case ( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
| 238
| 0
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase_ : str = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __a ( __lowerCamelCase : str ) -> List[Any]:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __a ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ = False
elif args.student_type == "gpt2":
lowercase_ = False
def __a ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ = False
def __a ( ) -> str:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__lowerCamelCase , required=__lowerCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__lowerCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=__lowerCamelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__lowerCamelCase , type=__lowerCamelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__lowerCamelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__lowerCamelCase , required=__lowerCamelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__lowerCamelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__lowerCamelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__lowerCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__lowerCamelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__lowerCamelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__lowerCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.1_5 , type=__lowerCamelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__lowerCamelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__lowerCamelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__lowerCamelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__lowerCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__lowerCamelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__lowerCamelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__lowerCamelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowerCamelCase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.0_5 , type=__lowerCamelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__lowerCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=__lowerCamelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=__lowerCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__lowerCamelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.0_2 , type=__lowerCamelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCamelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__lowerCamelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__lowerCamelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__lowerCamelCase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=__lowerCamelCase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__lowerCamelCase , default=4_000 , help="Checkpoint interval." )
lowercase_ = parser.parse_args()
sanity_checks(__lowerCamelCase )
# ARGS #
init_gpu_params(__lowerCamelCase )
set_seed(__lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__lowerCamelCase ) , __lowerCamelCase , indent=4 )
git_log(args.dump_path )
lowercase_ , lowercase_ , lowercase_ = MODEL_CLASSES[args.student_type]
lowercase_ , lowercase_ , lowercase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase_ = tokenizer.all_special_tokens.index(__lowerCamelCase )
lowercase_ = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
lowercase_ = special_tok_ids
lowercase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
lowercase_ = pickle.load(__lowerCamelCase )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
lowercase_ = pickle.load(__lowerCamelCase )
lowercase_ = np.maximum(__lowerCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase_ = 0.0 # do not predict special tokens
lowercase_ = torch.from_numpy(__lowerCamelCase )
else:
lowercase_ = None
lowercase_ = LmSeqsDataset(params=__lowerCamelCase , data=__lowerCamelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
lowercase_ = student_config_class.from_pretrained(args.student_config )
lowercase_ = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
lowercase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCamelCase )
else:
lowercase_ = student_model_class(__lowerCamelCase )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
lowercase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCamelCase )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowerCamelCase , __lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowerCamelCase , __lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase_ = Distiller(
params=__lowerCamelCase , dataset=__lowerCamelCase , token_probs=__lowerCamelCase , student=__lowerCamelCase , teacher=__lowerCamelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 461
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( __lowerCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __a ( __lowerCamelCase : str , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
lowercase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowercase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowercase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowercase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowercase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowercase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowercase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowercase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowercase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowercase_ = key.replace("image_encoder.module" , "flava.image_model" )
lowercase_ = key.replace("text_encoder.module" , "flava.text_model" )
lowercase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowercase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowercase_ = key.replace("text_projection" , "flava.text_projection" )
lowercase_ = key.replace("image_projection" , "flava.image_projection" )
lowercase_ = value.float()
for key, value in codebook_state_dict.items():
lowercase_ = value
return upgrade
@torch.no_grad()
def __a ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=None ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
lowercase_ = FlavaConfig.from_pretrained(__lowerCamelCase )
else:
lowercase_ = FlavaConfig()
lowercase_ = FlavaForPreTraining(__lowerCamelCase ).eval()
lowercase_ = convert_dalle_checkpoint(__lowerCamelCase , __lowerCamelCase , save_checkpoint=__lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
lowercase_ = torch.load(__lowerCamelCase , map_location="cpu" )
else:
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="cpu" )
lowercase_ = upgrade_state_dict(__lowerCamelCase , __lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
lowercase_ = hf_model.state_dict()
lowercase_ = count_parameters(__lowerCamelCase )
lowercase_ = count_parameters(__lowerCamelCase ) + count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 461
| 1
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase : List[str] =logging.get_logger(__name__)
__lowercase : Optional[int] =TypeVar("""DatasetType""", Dataset, IterableDataset)
def a__ ( lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(lowercase__ ):
if not isinstance(lowercase__ , (Dataset, IterableDataset) ):
if isinstance(lowercase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowercase__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase__ ).__name__}.' )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ =(
(Dataset, IterableDataset) if isinstance(lowercase__ , lowercase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase__ , lowercase__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase__ , lowercase__ , lowercase__ , info=lowercase__ , split=lowercase__ , stopping_strategy=lowercase__ )
else:
return _interleave_iterable_datasets(
lowercase__ , lowercase__ , lowercase__ , info=lowercase__ , split=lowercase__ , stopping_strategy=lowercase__ )
def a__ ( lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(lowercase__ ):
if not isinstance(lowercase__ , (Dataset, IterableDataset) ):
if isinstance(lowercase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowercase__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase__ ).__name__}.' )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ =(
(Dataset, IterableDataset) if isinstance(lowercase__ , lowercase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase__ , lowercase__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase__ , info=lowercase__ , split=lowercase__ , axis=lowercase__ )
else:
return _concatenate_iterable_datasets(lowercase__ , info=lowercase__ , split=lowercase__ , axis=lowercase__ )
| 54
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase_ ( a__ ):
__UpperCAmelCase = 'Speech2TextFeatureExtractor'
__UpperCAmelCase = 'Speech2TextTokenizer'
def __init__( self , a , a ):
super().__init__(a , a )
UpperCamelCase__ = self.feature_extractor
UpperCamelCase__ = False
def __call__( self , *a , **a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCamelCase__ = kwargs.pop("raw_speech" )
else:
UpperCamelCase__ = kwargs.pop("audio" , a )
UpperCamelCase__ = kwargs.pop("sampling_rate" , a )
UpperCamelCase__ = kwargs.pop("text" , a )
if len(a ) > 0:
UpperCamelCase__ = args[0]
UpperCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCamelCase__ = self.feature_extractor(a , *a , sampling_rate=a , **a )
if text is not None:
UpperCamelCase__ = self.tokenizer(a , **a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase__ = encodings["input_ids"]
return inputs
def __a ( self , *a , **a ):
return self.tokenizer.batch_decode(*a , **a )
def __a ( self , *a , **a ):
return self.tokenizer.decode(*a , **a )
@contextmanager
def __a ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer
yield
UpperCamelCase__ = self.feature_extractor
UpperCamelCase__ = False
| 223
|
'''simple docstring'''
import os
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
with open(os.path.dirname(__A ) + "/p022_names.txt" ) as file:
UpperCamelCase__ = str(file.readlines()[0] )
UpperCamelCase__ = names.replace("\"" , "" ).split("," )
names.sort()
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i, name in enumerate(__A ):
for letter in name:
name_score += ord(__A ) - 64
total_score += (i + 1) * name_score
UpperCamelCase__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 223
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__A = logging.get_logger(__name__)
class snake_case ( __snake_case ):
def __init__( self : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str])-> None:
'''simple docstring'''
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
| 346
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__A = object()
# For specifying empty leaf dict `{}`
__A = object()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: Dict = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ):
__lowerCAmelCase: Tuple = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(__SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Any = _get_partition_rules()
__lowerCAmelCase: List[Any] = _replacement_rules(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )}
__lowerCAmelCase: Any = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
| 346
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCamelCase ( ) -> None:
print("Making key files..." )
make_key_files("rsa" , 1_024 )
print("Key files generation successful." )
def __lowerCamelCase ( __a : int ) -> tuple[tuple[int, int], tuple[int, int]]:
print("Generating prime p..." )
_lowercase =rabinMiller.generate_large_prime(__a )
print("Generating prime q..." )
_lowercase =rabinMiller.generate_large_prime(__a )
_lowercase =p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowercase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__a , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowercase =cryptoMath.find_mod_inverse(__a , (p - 1) * (q - 1) )
_lowercase =(n, e)
_lowercase =(n, d)
return (public_key, private_key)
def __lowerCamelCase ( __a : str , __a : int ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowercase , _lowercase =generate_key(__a )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , "w" ) as out_file:
out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , "w" ) as out_file:
out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 594
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
_lowercase =json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =os.path.join(lowerCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 594
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__UpperCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowerCAmelCase ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=_lowerCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowerCAmelCase , _lowerCAmelCase )
class a ( _a ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def UpperCamelCase__ ( _snake_case ):
"""simple docstring"""
lowerCAmelCase = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=snake_case_ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=snake_case_ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=snake_case_ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=snake_case_ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=snake_case_ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=snake_case_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=snake_case_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=snake_case_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=snake_case_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**snake_case_ ) if self._reader.is_multi_columns else nlp(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
outputs.append(snake_case_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(snake_case_ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(snake_case_ )
| 4
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : List[str] ):
if isinstance(snake_case_ , snake_case_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
snake_case__ : Optional[Any] = deepcopy(snake_case_ )
elif os.path.exists(snake_case_ ):
with io.open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : Union[str, Any] = json.load(snake_case_ )
else:
try:
snake_case__ : List[Any] = baseaa.urlsafe_baadecode(snake_case_ ).decode("""utf-8""" )
snake_case__ : Optional[int] = json.loads(snake_case_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
snake_case__ : str = config
self.set_stage_and_offload()
def lowerCamelCase ( self : int ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
snake_case__ : Union[str, Any] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
snake_case__ : Optional[Any] = False
if self.is_zeroa() or self.is_zeroa():
snake_case__ : int = set(["""cpu""", """nvme"""] )
snake_case__ : Any = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
snake_case__ : List[Any] = True
def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = self.config
# find the config node of interest if it exists
snake_case__ : List[Any] = ds_key_long.split(""".""" )
snake_case__ : Tuple = nodes.pop()
for node in nodes:
snake_case__ : Optional[Any] = config.get(snake_case_ )
if config is None:
return None, ds_key
return config, ds_key
def lowerCamelCase ( self : List[Any] , snake_case_ : int , snake_case_ : List[str]=None ):
snake_case__ , snake_case__ : Dict = self.find_config_node(snake_case_ )
if config is None:
return default
return config.get(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : int=False ):
snake_case__ : Dict = self.config
# find the config node of interest if it exists
snake_case__ : str = ds_key_long.split(""".""" )
for node in nodes:
snake_case__ : Union[str, Any] = config
snake_case__ : List[Any] = config.get(snake_case_ )
if config is None:
if must_exist:
raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] ):
snake_case__ : Dict = self.get_value(snake_case_ )
return False if value is None else bool(snake_case_ )
def lowerCamelCase ( self : List[str] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = self.get_value(snake_case_ )
return False if value is None else not bool(snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
return self._stage == 2
def lowerCamelCase ( self : Dict ):
return self._stage == 3
def lowerCamelCase ( self : List[Any] ):
return self._offload
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , snake_case_ : List[Any] ):
snake_case__ : Tuple = engine
def lowerCamelCase ( self : Dict , snake_case_ : Optional[Any] , **snake_case_ : int ):
# runs backpropagation and handles mixed precision
self.engine.backward(snake_case_ , **snake_case_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Any ):
super().__init__(snake_case_ , device_placement=snake_case_ , scaler=snake_case_ )
snake_case__ : Optional[int] = hasattr(self.optimizer , """overflow""" )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any]=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCamelCase ( self : List[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCamelCase ( self : List[str] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
super().__init__(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : int=0.001 , snake_case_ : Optional[Any]=0 , **snake_case_ : List[Any] ):
snake_case__ : int = params
snake_case__ : Optional[Any] = lr
snake_case__ : Any = weight_decay
snake_case__ : Optional[Any] = kwargs
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : str , snake_case_ : Optional[int]=None , snake_case_ : Any=0 , **snake_case_ : List[str] ):
snake_case__ : List[Any] = optimizer
snake_case__ : List[Any] = total_num_steps
snake_case__ : List[Any] = warmup_num_steps
snake_case__ : Optional[Any] = kwargs
| 374
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : int , lowercase__ : Union[str, Any] , lowercase__ : int=3 , lowercase__ : List[str]=32 , lowercase__ : Tuple=3 , lowercase__ : Any=10 , lowercase__ : Tuple=[10, 20, 30, 40] , lowercase__ : int=[1, 1, 2, 1] , lowercase__ : List[Any]=True , lowercase__ : Any=True , lowercase__ : Union[str, Any]="relu" , lowercase__ : List[Any]=3 , lowercase__ : Optional[Any]=None , ) ->Optional[int]:
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = embeddings_size
_lowercase = hidden_sizes
_lowercase = depths
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_act
_lowercase = num_labels
_lowercase = scope
_lowercase = len(lowercase__)
def _UpperCAmelCase ( self : int) ->Dict:
"""simple docstring"""
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.num_labels)
_lowercase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Dict) ->Dict:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _UpperCAmelCase ( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : int) ->List[Any]:
"""simple docstring"""
_lowercase = RegNetModel(config=lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = model(lowercase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCAmelCase ( self : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int]) ->Optional[Any]:
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = RegNetForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : Tuple) ->List[str]:
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case ,_snake_case ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : str = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : str = False
def _UpperCAmelCase ( self : str) ->Optional[Any]:
"""simple docstring"""
_lowercase = RegNetModelTester(self)
_lowercase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__)
def _UpperCAmelCase ( self : Optional[int]) ->Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""")
def _UpperCAmelCase ( self : int) ->Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""")
def _UpperCAmelCase ( self : List[Any]) ->Tuple:
"""simple docstring"""
pass
def _UpperCAmelCase ( self : int) ->Dict:
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(lowercase__)
_lowercase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase__)
def _UpperCAmelCase ( self : List[str]) ->str:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def _UpperCAmelCase ( self : Any) ->List[Any]:
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(config=lowercase__)
for name, module in model.named_modules():
if isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _UpperCAmelCase ( self : Optional[int]) ->Dict:
"""simple docstring"""
def check_hidden_states_output(lowercase__ : int , lowercase__ : str , lowercase__ : Any):
_lowercase = model_class(lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(lowercase__ , lowercase__))
_lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase = self.model_tester.num_stages
self.assertEqual(len(lowercase__) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase = layer_type
_lowercase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
@slow
def _UpperCAmelCase ( self : str) ->List[Any]:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = RegNetModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Dict) ->List[str]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Tuple) ->Union[str, Any]:
"""simple docstring"""
_lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowercase__)
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=lowercase__ , return_tensors="""pt""").to(lowercase__)
# forward pass
with torch.no_grad():
_lowercase = model(**lowercase__)
# verify the logits
_lowercase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , lowercase__)
_lowercase = torch.tensor([-0.4180, -1.5051, -3.4836]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
| 572
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if n_term == "":
return []
_lowercase = []
for temp in range(int(snake_case_ ) ):
series.append(F"""1/{temp + 1}""" if series else """1""" )
return series
if __name__ == "__main__":
_lowerCamelCase = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 572
| 1
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowercase__ :
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
def _snake_case ( __snake_case : TreeNode | None ):
"""simple docstring"""
def is_valid_tree(__snake_case : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__snake_case , __snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__snake_case ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__snake_case : TreeNode | None , __snake_case : float , __snake_case : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __snake_case , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __snake_case )
)
return is_binary_search_tree_recursive_check(__snake_case , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE : List[str] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, 'models/bert/' ) )
SCREAMING_SNAKE_CASE_ = self.transformer_dir
shutil.copy(
os.path.join(_lowercase, 'src/transformers/models/bert/modeling_bert.py' ), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py' ), )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase=None ) -> List[str]:
SCREAMING_SNAKE_CASE_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
SCREAMING_SNAKE_CASE_ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
SCREAMING_SNAKE_CASE_ = black.format_str(_lowercase, mode=_lowercase )
SCREAMING_SNAKE_CASE_ = os.path.join(self.transformer_dir, 'new_code.py' )
with open(_lowercase, 'w', newline='\n' ) as f:
f.write(_lowercase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowercase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=_lowercase )
with open(_lowercase, 'r' ) as f:
self.assertTrue(f.read(), _lowercase )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(_lowercase, _lowercase )
def a__ ( self ) -> Union[str, Any]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', _lowercase, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', _lowercase ), )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""", f"""{long_class_name}LMPredictionHead""", re.sub('Bert', _lowercase, _lowercase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', _lowercase, overwrite_result=re.sub('Bert', 'TestModel', _lowercase ), )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
self.assertFalse(_lowercase )
self.assertEqual(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowercase )
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(_lowercase, _lowercase )
| 294
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 10 , _lowerCamelCase = 22 ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = range(1 , _lowerCamelCase )
_lowerCamelCase : Tuple = range(1 , _lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 386
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=_lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=_lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=_lowerCamelCase , help="where to store parsed gold_data_path file" , )
_lowerCamelCase : Tuple = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_lowerCamelCase : Union[str, Any] = json.load(_lowerCamelCase )
for dpr_record in tqdm(_lowerCamelCase ):
_lowerCamelCase : Tuple = dpr_record["question"]
_lowerCamelCase : List[str] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(_lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 386
| 1
|
'''simple docstring'''
class __A :
"""simple docstring"""
def __init__( self )-> int:
lowercase__ = {}
def snake_case_( self )-> None:
print(self.vertex )
for i in self.vertex:
print(_lowerCamelCase , ''' -> ''' , ''' -> '''.join([str(_lowerCamelCase ) for j in self.vertex[i]] ) )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCamelCase )
else:
# else make a new vertex
lowercase__ = [to_vertex]
def snake_case_( self )-> None:
# visited array for storing already visited nodes
lowercase__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> None:
# mark start vertex as visited
lowercase__ = True
print(_lowerCamelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 161
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( a_ : str = "" ):
__a = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__a = BeautifulSoup(requests.get(a_ ).text , 'html.parser' )
__a = soup.find_all('td' , attrs='titleColumn' )
__a = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a_ , a_ )
}
def SCREAMING_SNAKE_CASE ( a_ : str = "IMDb_Top_250_Movies.csv" ):
__a = get_imdb_top_aaa_movies()
with open(a_ , 'w' , newline='' ) as out_file:
__a = csv.writer(a_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 539
| 0
|
from torch import nn
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 716
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self , _snake_case = True ):
_lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCAmelCase : List[Any] = directed
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_lowerCAmelCase : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
_lowerCAmelCase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase : List[str] = [destination_vertex]
_lowerCAmelCase : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_lowerCAmelCase : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase : Dict = [destination_vertex]
_lowerCAmelCase : List[str] = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 587
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Dict:
__snake_case = 0
def lowercase ( self : Tuple ) -> str:
__snake_case = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Tuple ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : List[str] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case = AutoImageProcessor.from_pretrained(A_ ).to_dict()
config_dict.pop('''image_processor_type''' )
__snake_case = CLIPImageProcessor(**A_ )
# save in new folder
model_config.save_pretrained(A_ )
config.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : str ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
A_ , '''clip-base is not a local folder and is not a valid model identifier''' ):
__snake_case = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase ( self : str ) -> Any:
with self.assertRaisesRegex(
A_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case = AutoImageProcessor.from_pretrained(A_ , revision='''aaaaaa''' )
def lowercase ( self : int ) -> Any:
with self.assertRaisesRegex(
A_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase ( self : Dict ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A_ ):
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase ( self : List[str] ) -> Union[str, Any]:
try:
AutoConfig.register('''custom''' , A_ )
AutoImageProcessor.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoImageProcessor.register(A_ , A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = CustomImageProcessor.from_pretrained(A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase ( self : int ) -> List[Any]:
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = True
try:
AutoConfig.register('''custom''' , A_ )
AutoImageProcessor.register(A_ , A_ )
# If remote code is not set, the default is to use local
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 564
| 1
|
def A ( a_ ,a_ ) -> int:
__UpperCamelCase : Optional[Any] =0
__UpperCamelCase : str =len(a_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCamelCase : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a_ ):
return None
__UpperCamelCase : int =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCamelCase : List[Any] =left
__UpperCamelCase : Tuple =point
elif point > right:
__UpperCamelCase : Optional[int] =right
__UpperCamelCase : List[Any] =point
else:
if item < current_item:
__UpperCamelCase : Tuple =point - 1
else:
__UpperCamelCase : List[Any] =point + 1
return None
def A ( a_ ,a_ ,a_ ,a_ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCamelCase : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(a_ ,a_ ,a_ ,a_ )
elif point > right:
return interpolation_search_by_recursion(a_ ,a_ ,a_ ,a_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
a_ ,a_ ,a_ ,point - 1 )
else:
return interpolation_search_by_recursion(
a_ ,a_ ,point + 1 ,a_ )
def A ( a_ ) -> Optional[int]:
if collection != sorted(a_ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
A_ :Dict = 0
if debug == 1:
A_ :List[str] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
A_ :Union[str, Any] = 67
A_ :str = interpolation_search(collection, target)
if result is not None:
print(f"{target} found at positions: {result}")
else:
print('''Not found''')
| 721
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A ( a_ = "" ) -> dict[str, float]:
__UpperCamelCase : Tuple =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase : Optional[int] =BeautifulSoup(requests.get(a_ ).text ,'html.parser' )
__UpperCamelCase : Union[str, Any] =soup.find_all('td' ,attrs='titleColumn' )
__UpperCamelCase : Any =soup.find_all('td' ,class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a_ ,a_ )
}
def A ( a_ = "IMDb_Top_250_Movies.csv" ) -> None:
__UpperCamelCase : Dict =get_imdb_top_aaa_movies()
with open(a_ ,'w' ,newline='' ) as out_file:
__UpperCamelCase : Any =csv.writer(a_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 154
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A_ : Any = logging.get_logger(__name__)
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = ['input_features', 'attention_mask']
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=8_0 , __UpperCAmelCase : Dict=1_6_0_0_0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Tuple=1_0 , __UpperCAmelCase : Tuple=2_5 , __UpperCAmelCase : List[str]="hamming_window" , __UpperCAmelCase : List[str]=32_768.0 , __UpperCAmelCase : int=0.97 , __UpperCAmelCase : Dict=1.0 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[Any]=False , **__UpperCAmelCase : Dict , ) -> int:
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = frame_signal_scale
SCREAMING_SNAKE_CASE__ = preemphasis_coeff
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = normalize_means
SCREAMING_SNAKE_CASE__ = normalize_vars
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = return_attention_mask
SCREAMING_SNAKE_CASE__ = win_length * sampling_rate // 1_0_0_0
SCREAMING_SNAKE_CASE__ = hop_length * sampling_rate // 1_0_0_0
SCREAMING_SNAKE_CASE__ = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__ = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE__ = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Any:
# make sure we normalize float32 arrays
if self.normalize_means:
SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ = np.subtract(__UpperCAmelCase , __UpperCAmelCase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ = np.divide(__UpperCAmelCase , __UpperCAmelCase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCAmelCase , __UpperCAmelCase , self.padding_value ) for x, n in zip(__UpperCAmelCase , __UpperCAmelCase )]
def __call__( self : Dict , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ = [self._extract_mfsc_features(__UpperCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE__ = (
np.array(__UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE__ = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCAmelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
| 196
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = BertJapaneseTokenizer
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[int] = True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。 \nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_input_output_texts(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。\nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(do_lower_case=__UpperCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(
do_lower_case=__UpperCAmelCase , normalize_text=__UpperCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(normalize_text=__UpperCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。\nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(do_lower_case=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(normalize_text=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(trim_whitespace=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。\nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(normalize_text=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(trim_whitespace=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
SCREAMING_SNAKE_CASE__ = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE__ = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(__UpperCAmelCase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
SCREAMING_SNAKE_CASE__ = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(__UpperCAmelCase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""ありがとう。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Dict = BertJapaneseTokenizer
lowerCamelCase__ : Tuple = False
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__UpperCAmelCase : List[Any] ) -> List[Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = """こんにちは、世界。 \nこんばんは、世界。"""
SCREAMING_SNAKE_CASE__ = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
__UpperCAmelCase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = CharacterTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""ありがとう。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = """cl-tohoku/bert-base-japanese"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
SCREAMING_SNAKE_CASE__ = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 196
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
snake_case_ = """ctrl"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , UpperCamelCase_ : Any=246_534 , UpperCamelCase_ : int=256 , UpperCamelCase_ : List[Any]=1_280 , UpperCamelCase_ : List[str]=8_192 , UpperCamelCase_ : Union[str, Any]=48 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[Any]=1e-6 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=True , **UpperCamelCase_ : Dict , ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = vocab_size
_lowercase : Dict = n_positions
_lowercase : Optional[Any] = n_embd
_lowercase : List[str] = n_layer
_lowercase : List[str] = n_head
_lowercase : Optional[Any] = dff
_lowercase : Dict = resid_pdrop
_lowercase : Union[str, Any] = embd_pdrop
_lowercase : Union[str, Any] = layer_norm_epsilon
_lowercase : str = initializer_range
_lowercase : List[str] = use_cache
super().__init__(**_a )
| 712
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Optional[int] = model_type_to_module_name(snake_case_ )
_lowercase : Optional[Any] = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : int = importlib.import_module('''transformers''' )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : int , ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case_ , encoding='''utf-8''' ) as reader:
return json.load(snake_case_ )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> Tuple:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __lowercase ( cls : str , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
_lowercase : int = kwargs.pop('''config''' , UpperCamelCase_ )
_lowercase : Union[str, Any] = kwargs.pop('''trust_remote_code''' , UpperCamelCase_ )
_lowercase : str = True
_lowercase , _lowercase : int = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Any = config_dict.get('''image_processor_type''' , UpperCamelCase_ )
_lowercase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowercase : str = config_dict.pop('''feature_extractor_type''' , UpperCamelCase_ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_lowercase : Any = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_lowercase : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.image_processor_type``
_lowercase : Optional[int] = getattr(UpperCamelCase_ , '''image_processor_type''' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_lowercase : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_lowercase : int = image_processor_class_from_name(UpperCamelCase_ )
_lowercase : str = image_processor_auto_map is not None
_lowercase : List[str] = image_processor_class is not None or type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING
_lowercase : Tuple = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = kwargs.pop('''code_revision''' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_lowercase : List[str] = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase_ )]
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __lowercase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 411
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> dict:
'''simple docstring'''
lowercase = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(lowerCAmelCase__ ).json()
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0 ) -> list[dict]:
'''simple docstring'''
lowercase = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
lowercase = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0 ) -> str:
'''simple docstring'''
lowercase = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join("""* [{title}]({url})""".format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 359
|
"""simple docstring"""
from math import pow, sqrt
def UpperCAmelCase__ ( *lowerCAmelCase__ :float ) -> bool:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 359
| 1
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = metric_id
class UpperCamelCase :
'''simple docstring'''
A_ = [MetricMock(__lowercase ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if "tmp_path" in args:
_lowerCamelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__UpperCAmelCase , match='''https://huggingface.co/docs/evaluate''' ):
func(*__UpperCAmelCase )
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=32 * 4 , _lowerCAmelCase=32 * 6 , _lowerCAmelCase=4 , _lowerCAmelCase=32 , ) -> Tuple:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = is_training
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = num_queries
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_size
_lowerCAmelCase = max_size
_lowerCAmelCase = num_labels
_lowerCAmelCase = mask_feature_size
def _snake_case ( self ) -> int:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
_lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
_lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
_lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
_lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self ) -> Optional[Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _snake_case ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = output.encoder_hidden_states
_lowerCAmelCase = output.pixel_decoder_hidden_states
_lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
with torch.no_grad():
_lowerCAmelCase = MaskFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = MaskFormerForInstanceSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
_lowerCAmelCase = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowerCamelCase : List[str] = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : int = False
def _snake_case ( self ) -> Any:
_lowerCAmelCase = MaskFormerModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _snake_case ( self ) -> List[Any]:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _snake_case ( self ) -> Any:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _snake_case ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _snake_case ( self ) -> Union[str, Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def _snake_case ( self ) -> Dict:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase = MaskFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = (self.model_tester.min_size,) * 2
_lowerCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
_lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase )
_lowerCAmelCase = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
_lowerCAmelCase = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_lowerCAmelCase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def _snake_case ( self ) -> Any:
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_lowerCAmelCase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
_lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_SCREAMING_SNAKE_CASE = 1e-4
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Tuple:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
_lowerCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
_lowerCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
_lowerCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_lowerCAmelCase )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
_lowerCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_lowerCAmelCase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_lowerCAmelCase )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
_lowerCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_lowerCAmelCase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_lowerCAmelCase )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
_lowerCAmelCase = inputs["pixel_values"].to(_lowerCAmelCase )
_lowerCAmelCase = [el.to(_lowerCAmelCase ) for el in inputs["mask_labels"]]
_lowerCAmelCase = [el.to(_lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 18
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18
| 1
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowercase = logging.get_logger(__name__)
# General docstring
_lowercase = 'PoolFormerConfig'
# Base docstring
_lowercase = 'sail/poolformer_s12'
_lowercase = [1, 512, 7, 7]
# Image classification docstring
_lowercase = 'sail/poolformer_s12'
_lowercase = 'tabby, tabby cat'
_lowercase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = False ) -> Optional[Any]:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
lowerCamelCase__: List[Any] = 1 - drop_prob
lowerCamelCase__: Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCamelCase__: Union[str, Any] = keep_prob + torch.rand(_UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCamelCase__: Dict = input.div(_UpperCamelCase ) * random_tensor
return output
class lowerCamelCase__ ( nn.Module ):
def __init__( self : List[str] , __a : Optional[float] = None ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: int = drop_prob
def lowerCamelCase_ ( self : Union[str, Any] , __a : torch.Tensor ):
'''simple docstring'''
return drop_path(__a , self.drop_prob , self.training )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class lowerCamelCase__ ( nn.Module ):
def __init__( self : List[Any] , __a : Optional[Any] , __a : str , __a : Optional[Any] , __a : Any , __a : Optional[int] , __a : Optional[Any]=None ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: Dict = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCamelCase__: Optional[int] = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride)
lowerCamelCase__: Any = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding)
lowerCamelCase__: Optional[Any] = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a )
lowerCamelCase__: Optional[Any] = norm_layer(__a ) if norm_layer else nn.Identity()
def lowerCamelCase_ ( self : List[Any] , __a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = self.projection(__a )
lowerCamelCase__: str = self.norm(__a )
return embeddings
class lowerCamelCase__ ( nn.GroupNorm ):
def __init__( self : str , __a : Optional[int] , **__a : List[Any] ):
'''simple docstring'''
super().__init__(1 , __a , **__a )
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , __a : Optional[Any] ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[int] = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a )
def lowerCamelCase_ ( self : int , __a : Tuple ):
'''simple docstring'''
return self.pool(__a ) - hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , __a : Optional[int] , __a : int , __a : Union[str, Any] , __a : Tuple ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[Any] = nn.Convad(__a , __a , 1 )
lowerCamelCase__: Optional[int] = nn.Convad(__a , __a , 1 )
lowerCamelCase__: Union[str, Any] = PoolFormerDropPath(__a )
if isinstance(config.hidden_act , __a ):
lowerCamelCase__: int = ACTaFN[config.hidden_act]
else:
lowerCamelCase__: List[Any] = config.hidden_act
def lowerCamelCase_ ( self : int , __a : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self.conva(__a )
lowerCamelCase__: List[Any] = self.act_fn(__a )
lowerCamelCase__: Optional[Any] = self.drop(__a )
lowerCamelCase__: List[str] = self.conva(__a )
lowerCamelCase__: Dict = self.drop(__a )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , __a : int , __a : Dict , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any] ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: List[Any] = PoolFormerPooling(__a )
lowerCamelCase__: str = PoolFormerOutput(__a , __a , __a , __a )
lowerCamelCase__: str = PoolFormerGroupNorm(__a )
lowerCamelCase__: int = PoolFormerGroupNorm(__a )
# Useful for training neural nets
lowerCamelCase__: Optional[int] = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity()
lowerCamelCase__: Dict = config.use_layer_scale
if config.use_layer_scale:
lowerCamelCase__: Dict = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
lowerCamelCase__: str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
def lowerCamelCase_ ( self : str , __a : Any ):
'''simple docstring'''
if self.use_layer_scale:
lowerCamelCase__: Optional[int] = self.pooling(self.before_norm(__a ) )
lowerCamelCase__: List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCamelCase__: List[Any] = hidden_states + self.drop_path(__a )
lowerCamelCase__: Dict = ()
lowerCamelCase__: Optional[Any] = self.output(self.after_norm(__a ) )
lowerCamelCase__: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCamelCase__: List[Any] = hidden_states + self.drop_path(__a )
lowerCamelCase__: Union[str, Any] = (output,) + outputs
return outputs
else:
lowerCamelCase__: Optional[Any] = self.drop_path(self.pooling(self.before_norm(__a ) ) )
# First residual connection
lowerCamelCase__: List[str] = pooling_output + hidden_states
lowerCamelCase__: Optional[int] = ()
# Second residual connection inside the PoolFormerOutput block
lowerCamelCase__: str = self.drop_path(self.output(self.after_norm(__a ) ) )
lowerCamelCase__: Dict = hidden_states + layer_output
lowerCamelCase__: Any = (output,) + outputs
return outputs
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , __a : List[Any] ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: Dict = config
# stochastic depth decay rule
lowerCamelCase__: Optional[int] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCamelCase__: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCamelCase__: List[Any] = nn.ModuleList(__a )
# Transformer blocks
lowerCamelCase__: Optional[Any] = []
lowerCamelCase__: Optional[int] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCamelCase__: Any = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__a ) )
lowerCamelCase__: List[Any] = nn.ModuleList(__a )
def lowerCamelCase_ ( self : Tuple , __a : Tuple , __a : Any=False , __a : str=True ):
'''simple docstring'''
lowerCamelCase__: List[str] = () if output_hidden_states else None
lowerCamelCase__: Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCamelCase__: Union[str, Any] = layers
# Get patch embeddings from hidden_states
lowerCamelCase__: List[str] = embedding_layer(__a )
# Send the embeddings through the blocks
for _, blk in enumerate(__a ):
lowerCamelCase__: Tuple = blk(__a )
lowerCamelCase__: Union[str, Any] = layer_outputs[0]
if output_hidden_states:
lowerCamelCase__: Optional[int] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a )
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = PoolFormerConfig
__lowerCamelCase = """poolformer"""
__lowerCamelCase = """pixel_values"""
__lowerCamelCase = True
def lowerCamelCase_ ( self : Dict , __a : str ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCamelCase_ ( self : Dict , __a : Optional[Any] , __a : Optional[Any]=False ):
'''simple docstring'''
if isinstance(__a , __a ):
lowerCamelCase__: int = value
_lowercase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowercase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , A__ , )
class lowerCamelCase__ ( A__ ):
def __init__( self : List[str] , __a : Union[str, Any] ):
'''simple docstring'''
super().__init__(__a )
lowerCamelCase__: Union[str, Any] = config
lowerCamelCase__: Tuple = PoolFormerEncoder(__a )
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase_ ( self : Optional[int] , __a : Optional[torch.FloatTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCamelCase__: Dict = self.encoder(
__a , output_hidden_states=__a , return_dict=__a , )
lowerCamelCase__: List[str] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , )
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Tuple , __a : Any ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[int] = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCamelCase_ ( self : Any , __a : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: int = self.dense(__a )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , A__ , )
class lowerCamelCase__ ( A__ ):
def __init__( self : Optional[int] , __a : Any ):
'''simple docstring'''
super().__init__(__a )
lowerCamelCase__: str = config.num_labels
lowerCamelCase__: int = PoolFormerModel(__a )
# Final norm
lowerCamelCase__: Tuple = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCamelCase__: List[Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase_ ( self : Optional[Any] , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__: Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: List[str] = self.poolformer(
__a , output_hidden_states=__a , return_dict=__a , )
lowerCamelCase__: Tuple = outputs[0]
lowerCamelCase__: List[str] = self.classifier(self.norm(__a ).mean([-2, -1] ) )
lowerCamelCase__: List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__: Union[str, Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__: Union[str, Any] = """single_label_classification"""
else:
lowerCamelCase__: List[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCamelCase__: Optional[Any] = MSELoss()
if self.num_labels == 1:
lowerCamelCase__: str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase__: Tuple = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__: Union[str, Any] = CrossEntropyLoss()
lowerCamelCase__: str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__: Optional[Any] = BCEWithLogitsLoss()
lowerCamelCase__: Tuple = loss_fct(__a , __a )
if not return_dict:
lowerCamelCase__: Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
| 713
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowercase = 'bert-base-cased'
_lowercase = 'fp16'
_lowercase = 'bf16'
_lowercase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase__ ( A__ ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[str] = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a ):
lowerCamelCase__: Union[str, Any] = self.dist_env.copy()
lowerCamelCase__: int = f"""{i + 1}"""
lowerCamelCase__: Any = strategy
with mockenv_context(**__a ):
lowerCamelCase__: Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a ):
lowerCamelCase__: List[str] = self.dist_env.copy()
lowerCamelCase__: List[str] = prefetch_policy
with mockenv_context(**__a ):
lowerCamelCase__: Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a ):
lowerCamelCase__: str = self.dist_env.copy()
lowerCamelCase__: Tuple = state_dict_type
with mockenv_context(**__a ):
lowerCamelCase__: int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: str = AutoModel.from_pretrained(__a )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCamelCase__: str = self.dist_env.copy()
lowerCamelCase__: Union[str, Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCamelCase__: Union[str, Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
lowerCamelCase__: Optional[Any] = """2000"""
with mockenv_context(**__a ):
lowerCamelCase__: Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCamelCase__: Optional[Any] = self.dist_env.copy()
lowerCamelCase__: str = """TRANSFORMER_BASED_WRAP"""
lowerCamelCase__: str = """T5Layer"""
with mockenv_context(**__a ):
lowerCamelCase__: Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(__a ) as cm:
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
lowerCamelCase__: Union[str, Any] = self.dist_env.copy()
lowerCamelCase__: int = """SIZE_BASED_WRAP"""
lowerCamelCase__: str = """0"""
with mockenv_context(**__a ):
lowerCamelCase__: Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCamelCase__: Dict = self.dist_env.copy()
lowerCamelCase__: Union[str, Any] = mp_dtype
with mockenv_context(**__a ):
lowerCamelCase__: List[str] = Accelerator()
if mp_dtype == "fp16":
lowerCamelCase__: Any = torch.floataa
elif mp_dtype == "bf16":
lowerCamelCase__: Dict = torch.bfloataa
lowerCamelCase__: str = MixedPrecision(param_dtype=__a , reduce_dtype=__a , buffer_dtype=__a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCamelCase__: List[Any] = self.dist_env.copy()
lowerCamelCase__: int = str(__a ).lower()
with mockenv_context(**__a ):
lowerCamelCase__: int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__a ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase__ ( A__ ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = 0.82
lowerCamelCase__: List[str] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
lowerCamelCase__: List[str] = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCamelCase__: List[str] = 160
lowerCamelCase__: Optional[int] = 160
lowerCamelCase__: Optional[int] = inspect.getfile(accelerate.test_utils )
lowerCamelCase__: Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: List[str] = os.path.join(self.test_scripts_folder , """test_performance.py""" )
lowerCamelCase__: Dict = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
lowerCamelCase__: Dict = cmd.copy()
for i, strategy in enumerate(__a ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: str = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
lowerCamelCase__: Optional[Any] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__a ):
lowerCamelCase__: Optional[Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
lowerCamelCase__: Union[str, Any] = len(__a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCamelCase__: Dict = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
lowerCamelCase__: Union[str, Any] = cmd_config[:-1]
lowerCamelCase__: List[Any] = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: str = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
lowerCamelCase__: int = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCamelCase__: str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__a ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 242
| 0
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Tuple:
A__ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ = torch.manual_seed(0 )
A__ = pipe.dual_guided(
prompt="first prompt" , image=snake_case__ , text_to_image_strength=0.7_5 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
A__ = VersatileDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A__ = generator.manual_seed(0 )
A__ = pipe.dual_guided(
prompt="first prompt" , image=snake_case__ , text_to_image_strength=0.7_5 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case__ ( self ) -> Dict:
A__ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A__ = "cyberpunk 2077"
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ = torch.manual_seed(0 )
A__ = pipe.dual_guided(
prompt=snake_case__ , image=snake_case__ , text_to_image_strength=0.7_5 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A__ = "A painting of a squirrel eating a burger "
A__ = torch.manual_seed(0 )
A__ = pipe.text_to_image(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A__ = pipe.image_variation(snake_case__ , generator=snake_case__ , output_type="numpy" ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 104
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ =(KDPMaDiscreteScheduler,)
lowerCAmelCase__ =10
def __a ( self : Dict , **snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'num_train_timesteps': 11_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**snake_case__ )
return config
def __a ( self : str ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def __a ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def __a ( self : Optional[int] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def __a ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = model(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def __a ( self : Union[str, Any] ):
"""simple docstring"""
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = model(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def __a ( self : Optional[int] ):
"""simple docstring"""
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = model(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 360
| 0
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase_( _A :List[str] , _A :int , _A :Any , _A :List[str] , _A :str )-> Optional[Any]:
for attribute in key.split("." ):
UpperCamelCase__ = getattr(_A , _A )
if weight_type is not None:
UpperCamelCase__ = getattr(_A , _A ).shape
else:
UpperCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase_( _A :List[str] , _A :Optional[int] )-> Dict:
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.feature_extractor
UpperCamelCase__ = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase__ = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(_A , _A , _A , _A )
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(_A )[0].split("." )[-2]
UpperCamelCase__ = mapped_key.replace("*" , _A )
if "weight_g" in name:
UpperCamelCase__ = "weight_g"
elif "weight_v" in name:
UpperCamelCase__ = "weight_v"
elif "bias" in name:
UpperCamelCase__ = "bias"
elif "weight" in name:
UpperCamelCase__ = "weight"
else:
UpperCamelCase__ = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase_( _A :Optional[int] , _A :List[str] , _A :Optional[Any] , _A :str , _A :List[Any] )-> Tuple:
UpperCamelCase__ = full_name.split("conv_layers." )[-1]
UpperCamelCase__ = name.split("." )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCamelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCamelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCamelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCamelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
def UpperCamelCase_( _A :Tuple , _A :List[str] , _A :Dict , _A :List[Any] )-> int:
UpperCamelCase__ = full_name.split("adaptor." )[-1]
UpperCamelCase__ = name.split("." )
if items[1].isdigit():
UpperCamelCase__ = int(items[1] )
else:
UpperCamelCase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
UpperCamelCase__ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
UpperCamelCase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
UpperCamelCase__ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
UpperCamelCase__ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_A , _A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
UpperCamelCase__ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
UpperCamelCase__ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
def UpperCamelCase_( _A :Tuple )-> Tuple:
UpperCamelCase__, UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(_A , _A , bias=_A )
UpperCamelCase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase_( _A :List[str] , _A :int , _A :str , _A :Any , _A :Union[str, Any] , _A :Optional[Any] , _A :Dict , _A :Tuple , _A :Optional[int] , _A :Optional[Any] , _A :List[Any] , )-> str:
UpperCamelCase__ = WavaVecaConfig.from_pretrained(
_A , add_adapter=_A , adapter_stride=_A , adapter_kernel_size=_A , use_auth_token=_A , output_hidden_size=_A , )
UpperCamelCase__ = MBartConfig.from_pretrained(_A )
# load model
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
UpperCamelCase__ = model[0].eval()
# load feature extractor
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(_A , use_auth_token=_A )
# set weights for wav2vec2 encoder
UpperCamelCase__ = WavaVecaModel(_A )
recursively_load_weights_wavaveca(model.encoder , _A )
# load decoder weights
UpperCamelCase__ = MBartForCausalLM(_A )
UpperCamelCase__, UpperCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
UpperCamelCase__ = SpeechEncoderDecoderModel(encoder=_A , decoder=_A )
UpperCamelCase__ = False
UpperCamelCase__ = MBartaaTokenizer(_A )
tokenizer.save_pretrained(_A )
UpperCamelCase__ = hf_wavavec.config.to_dict()
UpperCamelCase__ = tokenizer.pad_token_id
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = tokenizer.eos_token_id
UpperCamelCase__ = "mbart50"
UpperCamelCase__ = "wav2vec2"
UpperCamelCase__ = tokenizer.eos_token_id
UpperCamelCase__ = 25_00_04
UpperCamelCase__ = tokenizer.eos_token_id
UpperCamelCase__ = SpeechEncoderDecoderConfig.from_dict(_A )
hf_wavavec.save_pretrained(_A )
feature_extractor.save_pretrained(_A )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config')
__UpperCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 185
|
def UpperCamelCase_( _A :str )-> int:
UpperCamelCase__ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
UpperCamelCase__ = hex_num[0] == "-"
if is_negative:
UpperCamelCase__ = hex_num[1:]
try:
UpperCamelCase__ = int(_A , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
UpperCamelCase__ = ""
while int_num > 0:
UpperCamelCase__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( A_ ):
lowerCamelCase : int =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
a : str = size if size is not None else {'''shortest_edge''': 224}
a : Optional[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a : Optional[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size" )
a : str = do_resize
a : List[Any] = size
a : List[Any] = resample
a : Dict = do_center_crop
a : str = crop_size
a : Any = do_rescale
a : Optional[int] = rescale_factor
a : Any = do_normalize
a : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
a : Tuple = do_convert_rgb
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a : Union[str, Any] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Dict = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[Any]:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Optional[Any]:
a : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a : Any = size if size is not None else self.size
a : str = get_size_dict(lowerCAmelCase__ , param_name="size" , default_to_square=lowerCAmelCase__ )
a : Any = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Any = crop_size if crop_size is not None else self.crop_size
a : List[Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__ )
a : Any = do_rescale if do_rescale is not None else self.do_rescale
a : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
a : int = do_normalize if do_normalize is not None else self.do_normalize
a : Optional[int] = image_mean if image_mean is not None else self.image_mean
a : List[str] = image_std if image_std is not None else self.image_std
a : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a : List[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a : Optional[Any] = [convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
a : List[str] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
a : List[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
a : List[Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
a : int = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
a : Optional[int] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 633
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[Any]=13 , UpperCamelCase : List[Any]=16 , UpperCamelCase : Tuple=7 , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Dict=False , UpperCamelCase : str=True , UpperCamelCase : Any=2 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : str=4 , UpperCamelCase : str=4 , UpperCamelCase : Union[str, Any]=30 , UpperCamelCase : Any=0 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : int=2 , UpperCamelCase : int=None , ):
"""simple docstring"""
_lowercase : Tuple = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = decoder_seq_length
# For common tests
_lowercase : Union[str, Any] = self.decoder_seq_length
_lowercase : str = is_training
_lowercase : int = use_attention_mask
_lowercase : Any = use_labels
_lowercase : List[str] = vocab_size
_lowercase : int = d_model
_lowercase : Optional[int] = d_model
_lowercase : Optional[int] = decoder_layers
_lowercase : str = decoder_layers
_lowercase : Dict = decoder_ffn_dim
_lowercase : Union[str, Any] = decoder_attention_heads
_lowercase : Optional[Any] = decoder_attention_heads
_lowercase : int = eos_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : Any = pad_token_id
_lowercase : List[str] = decoder_start_token_id
_lowercase : str = use_cache
_lowercase : str = max_position_embeddings
_lowercase : Union[str, Any] = None
_lowercase : int = decoder_seq_length
_lowercase : List[Any] = 2
_lowercase : str = 1
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : Optional[Any] = None
if self.use_attention_mask:
_lowercase : str = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowercase : int = None
if self.use_labels:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : List[str] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : int , ):
"""simple docstring"""
_lowercase : List[str] = True
_lowercase : int = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
_lowercase : List[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowercase : List[str] = model(UpperCamelCase , use_cache=UpperCamelCase )
_lowercase : Optional[Any] = model(UpperCamelCase )
_lowercase : Tuple = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
_lowercase : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowercase : Any = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowercase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase : str = model(UpperCamelCase )['''last_hidden_state''']
_lowercase : str = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
_lowercase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowercase : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : int = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass
| 322
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Any = "isbn/0140328726" ):
UpperCamelCase_ : Optional[int] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
UpperCamelCase_ : Optional[int] = f'''{olid} is not a valid Open Library olid'''
raise ValueError(__lowerCAmelCase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ):
UpperCamelCase_ : Any = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
UpperCamelCase_ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase_ : Optional[Any] = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
UpperCamelCase_ : Dict = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = """, """.join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE : Any = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE : Optional[Any] = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 712
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
a__ :List[str] = StableDiffusionLDMaDPipeline
a__ :str = TEXT_TO_IMAGE_PARAMS
a__ :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase_ : Tuple = CLIPTextModel(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase_ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ (self , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]:
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> str:
UpperCamelCase_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : List[str] = self.get_dummy_components()
UpperCamelCase_ : List[Any] = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : List[str] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Optional[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : str = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
UpperCamelCase_ : Optional[Any] = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def A_ (self ) -> List[Any]:
UpperCamelCase_ : Tuple = self.get_dummy_components()
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase_ : List[Any] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : int = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase_ : Any = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase_ : Optional[int] = ldmad_pipe.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : List[Any] = text_inputs["""input_ids"""].to(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = ldmad_pipe.text_encoder(__UpperCamelCase )[0]
UpperCamelCase_ : Optional[int] = prompt_embeds
# forward
UpperCamelCase_ : Any = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : int = self.get_dummy_components()
UpperCamelCase_ : List[str] = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : str = """french fries"""
UpperCamelCase_ : Dict = ldmad_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : List[str] = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
UpperCamelCase_ : Tuple = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> List[str]:
UpperCamelCase_ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Dict = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
UpperCamelCase_ : Dict = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : str = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Tuple = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase_ : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase_ : str = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
UpperCamelCase_ : List[str] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> Any:
UpperCamelCase_ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Any = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Any = 0.495_586
UpperCamelCase_ : Dict = 0.33_795_515
UpperCamelCase_ : Optional[int] = 112.48_518
UpperCamelCase_ : Union[str, Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : int = 0.4_194_127
UpperCamelCase_ : Optional[Any] = 0.35_375_586
UpperCamelCase_ : Optional[Any] = 0.5_638_502
UpperCamelCase_ : List[Any] = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 138
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(_SCREAMING_SNAKE_CASE )
or left < -len(_SCREAMING_SNAKE_CASE )
or right >= len(_SCREAMING_SNAKE_CASE )
or right < -len(_SCREAMING_SNAKE_CASE )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
lowercase__ = (left + right) >> 1 # the middle
lowercase__ = find_max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # find max in range[left, mid]
lowercase__ = find_max(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 235
|
import requests
lowercase_ = """YOUR API KEY"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = giphy_api_key ) -> list:
lowercase__ = '+'.join(query.split() )
lowercase__ = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 235
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 157
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, snake_case__ ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = 20
lowercase_ : Union[str, Any] = self._get_uniform_logits(batch_size=2, length=snake_case__ )
# tweak scores to not be uniform anymore
lowercase_ : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase_ : Any = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase_ : str = jax.nn.softmax(snake_case__, axis=-1 )
lowercase_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase_ : List[Any] = jax.nn.softmax(temp_dist_warper_sharper(snake_case__, scores.copy(), cur_len=snake_case__ ), axis=-1 )
lowercase_ : Union[str, Any] = jax.nn.softmax(temp_dist_warper_smoother(snake_case__, scores.copy(), cur_len=snake_case__ ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Any = None
lowercase_ : List[str] = 10
lowercase_ : int = 2
# create ramp distribution
lowercase_ : Any = np.broadcast_to(np.arange(snake_case__ )[None, :], (batch_size, vocab_size) ).copy()
lowercase_ : Any = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase_ : int = FlaxTopKLogitsWarper(3 )
lowercase_ : Union[str, Any] = top_k_warp(snake_case__, snake_case__, cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase_ : Optional[int] = 5
lowercase_ : List[Any] = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
lowercase_ : Union[str, Any] = np.broadcast_to(np.arange(snake_case__ )[None, :], (batch_size, length) ).copy()
lowercase_ : Tuple = top_k_warp_safety_check(snake_case__, snake_case__, cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[Any] = None
lowercase_ : Dict = 10
lowercase_ : Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase_ : Dict = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase_ : Any = FlaxTopPLogitsWarper(0.8 )
lowercase_ : int = np.exp(top_p_warp(snake_case__, snake_case__, cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase_ : List[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case__, snake_case__, atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowercase_ : Union[str, Any] = np.broadcast_to(np.arange(snake_case__ )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase_ : Optional[Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase_ : Optional[int] = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
lowercase_ : Optional[Any] = top_p_warp(snake_case__, snake_case__, cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : List[str] = 20
lowercase_ : Any = 4
lowercase_ : List[Any] = 0
lowercase_ : int = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=snake_case__ )
# check that min length is applied at length 5
lowercase_ : Any = ids_tensor((batch_size, 20), vocab_size=20 )
lowercase_ : Any = 5
lowercase_ : Any = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : List[str] = min_dist_processor(snake_case__, snake_case__, cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowercase_ : Any = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : Optional[Any] = 15
lowercase_ : Union[str, Any] = min_dist_processor(snake_case__, snake_case__, cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Any = 20
lowercase_ : Dict = 4
lowercase_ : List[str] = 0
lowercase_ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
lowercase_ : Dict = ids_tensor((batch_size, 1), vocab_size=20 )
lowercase_ : Any = 1
lowercase_ : Any = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : List[str] = logits_processor(snake_case__, snake_case__, cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase_ : int = 3
lowercase_ : Optional[Any] = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : Optional[Any] = logits_processor(snake_case__, snake_case__, cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = 20
lowercase_ : Dict = 4
lowercase_ : str = 0
lowercase_ : Dict = 5
lowercase_ : int = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__, eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase_ : Tuple = ids_tensor((batch_size, 4), vocab_size=20 )
lowercase_ : List[str] = 4
lowercase_ : List[Any] = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : Tuple = logits_processor(snake_case__, snake_case__, cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase_ : List[str] = 3
lowercase_ : Dict = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : Union[str, Any] = logits_processor(snake_case__, snake_case__, cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = 4
lowercase_ : Optional[int] = 10
lowercase_ : Optional[int] = 15
lowercase_ : List[Any] = 2
lowercase_ : Tuple = 1
lowercase_ : Tuple = 15
# dummy input_ids and scores
lowercase_ : str = ids_tensor((batch_size, sequence_length), snake_case__ )
lowercase_ : Any = input_ids.copy()
lowercase_ : List[Any] = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : List[Any] = scores.copy()
# instantiate all dist processors
lowercase_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ : Any = FlaxTopKLogitsWarper(3 )
lowercase_ : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=snake_case__ )
lowercase_ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
lowercase_ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__, eos_token_id=snake_case__ )
lowercase_ : Optional[Any] = 10
# no processor list
lowercase_ : Tuple = temp_dist_warp(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : int = top_k_warp(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : List[str] = top_p_warp(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : int = min_dist_proc(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : Tuple = bos_dist_proc(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : Optional[int] = eos_dist_proc(snake_case__, snake_case__, cur_len=snake_case__ )
# with processor list
lowercase_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ : List[str] = processor(snake_case__, snake_case__, cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__, snake_case__, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = 4
lowercase_ : int = 10
lowercase_ : Dict = 15
lowercase_ : Optional[int] = 2
lowercase_ : List[Any] = 1
lowercase_ : Tuple = 15
# dummy input_ids and scores
lowercase_ : Optional[int] = ids_tensor((batch_size, sequence_length), snake_case__ )
lowercase_ : int = input_ids.copy()
lowercase_ : List[Any] = self._get_uniform_logits(snake_case__, snake_case__ )
lowercase_ : Any = scores.copy()
# instantiate all dist processors
lowercase_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ : Union[str, Any] = FlaxTopKLogitsWarper(3 )
lowercase_ : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ : Any = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=snake_case__ )
lowercase_ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
lowercase_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__, eos_token_id=snake_case__ )
lowercase_ : str = 10
# no processor list
def run_no_processor_list(snake_case__, snake_case__, snake_case__ ):
lowercase_ : str = temp_dist_warp(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : Optional[int] = top_k_warp(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : Dict = top_p_warp(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : Optional[int] = min_dist_proc(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : Optional[Any] = bos_dist_proc(snake_case__, snake_case__, cur_len=snake_case__ )
lowercase_ : str = eos_dist_proc(snake_case__, snake_case__, cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__, snake_case__, snake_case__ ):
lowercase_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ : Any = processor(snake_case__, snake_case__, cur_len=snake_case__ )
return scores
lowercase_ : Dict = jax.jit(snake_case__ )
lowercase_ : Dict = jax.jit(snake_case__ )
lowercase_ : Optional[Any] = jitted_run_no_processor_list(snake_case__, snake_case__, snake_case__ )
lowercase_ : int = jitted_run_processor_list(snake_case__, snake_case__, snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__, snake_case__, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 458
|
from string import ascii_uppercase
UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ = dict(enumerate(ascii_uppercase))
def __magic_name__ ( lowercase , lowercase ) -> str:
"""simple docstring"""
lowercase_ : Dict = len(lowercase )
lowercase_ : Tuple = 0
while True:
if x == i:
lowercase_ : Dict = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def __magic_name__ ( lowercase , lowercase ) -> str:
"""simple docstring"""
lowercase_ : Any = """"""
lowercase_ : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase_ : Any = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __magic_name__ ( lowercase , lowercase ) -> str:
"""simple docstring"""
lowercase_ : Optional[Any] = """"""
lowercase_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase_ : Optional[int] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __magic_name__ ( ) -> None:
"""simple docstring"""
lowercase_ : Union[str, Any] = """THE GERMAN ATTACK"""
lowercase_ : str = """SECRET"""
lowercase_ : int = generate_key(lowercase , lowercase )
lowercase_ : List[str] = cipher_text(lowercase , lowercase )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(lowercase , lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 458
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : List[Any] , lowerCamelCase__ : VQModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : DDIMScheduler ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : str , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Tuple , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase__ , )
UpperCamelCase__ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : int = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : str = {}
if accepts_eta:
UpperCamelCase__ : Tuple = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ : Tuple = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Optional[Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Union[str, Any] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ : Optional[Any] = self.vqvae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 712
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Optional[int] = BarthezTokenizer
A: Optional[int] = BarthezTokenizerFast
A: str = True
A: Optional[int] = True
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
super().setUp()
UpperCamelCase__ : Optional[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
UpperCamelCase__ : int = tokenizer
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''<pad>'''
UpperCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase__ : Any = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase__ : Optional[int] = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : List[Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.get_rust_tokenizer()
UpperCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ : int = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCamelCase__ , )
| 106
| 0
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> List[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A_ = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '' ) ) )
lowerCamelCase_ = 'What is the placebo?'
lowerCamelCase_ = [
{
'image': load_image(SCREAMING_SNAKE_CASE_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = dqa_pipeline(SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{'score': ANY(SCREAMING_SNAKE_CASE_ ), 'answer': ANY(SCREAMING_SNAKE_CASE_ ), 'start': ANY(SCREAMING_SNAKE_CASE_ ), 'end': ANY(SCREAMING_SNAKE_CASE_ )},
{'score': ANY(SCREAMING_SNAKE_CASE_ ), 'answer': ANY(SCREAMING_SNAKE_CASE_ ), 'start': ANY(SCREAMING_SNAKE_CASE_ ), 'end': ANY(SCREAMING_SNAKE_CASE_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'How many cats are there?'
lowerCamelCase_ = [
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , SCREAMING_SNAKE_CASE_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , words=SCREAMING_SNAKE_CASE_ , boxes=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=SCREAMING_SNAKE_CASE_ , revision='3dc6de3' , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=SCREAMING_SNAKE_CASE_ , revision='3dc6de3' , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , '' ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = 'What is the invoice number?'
lowerCamelCase_ = dqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase = pytest.mark.integration
@pytest.mark.parametrize('path' ,['paws', 'csv'] )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
inspect_dataset(__lowercase ,__lowercase )
A_ : Optional[Any] = path + '.py'
assert script_name in os.listdir(__lowercase )
assert "__pycache__" not in os.listdir(__lowercase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' ,['accuracy'] )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Union[str, Any] ):
'''simple docstring'''
inspect_metric(__lowercase ,__lowercase )
A_ : Optional[Any] = path + '.py'
assert script_name in os.listdir(__lowercase )
assert "__pycache__" not in os.listdir(__lowercase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Dict ):
'''simple docstring'''
A_ : List[Any] = get_dataset_config_info(__lowercase ,config_name=__lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ,__lowercase : int ):
'''simple docstring'''
with pytest.raises(__lowercase ):
get_dataset_config_info(__lowercase ,config_name=__lowercase )
@pytest.mark.parametrize(
'path, expected' ,[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] ,)
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : Any = get_dataset_config_names(__lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' ,[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : str ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = get_dataset_infos(__lowercase )
assert list(infos.keys() ) == expected_configs
A_ : Any = expected_configs[0]
assert expected_config in infos
A_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : Any ,__lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = get_dataset_infos(__lowercase )
assert expected_config in infos
A_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple ,__lowercase : str ):
'''simple docstring'''
with pytest.raises(__lowercase ):
get_dataset_split_names(__lowercase ,config_name=__lowercase )
| 558
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A_ :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , __UpperCAmelCase=["stage1", "stage2", "stage3"] , __UpperCAmelCase=[1, 2, 3] , ) -> List[str]:
a : Union[str, Any] = parent
a : Optional[Any] = batch_size
a : Optional[int] = image_size
a : int = patch_size
a : Tuple = num_channels
a : Optional[Any] = embed_dim
a : Tuple = depths
a : List[str] = num_heads
a : Tuple = window_size
a : Optional[Any] = mlp_ratio
a : Union[str, Any] = qkv_bias
a : int = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = drop_path_rate
a : List[str] = hidden_act
a : Dict = use_absolute_embeddings
a : Dict = patch_norm
a : int = layer_norm_eps
a : str = initializer_range
a : Optional[int] = is_training
a : Union[str, Any] = scope
a : Optional[int] = use_labels
a : int = type_sequence_label_size
a : Union[str, Any] = encoder_stride
a : Optional[int] = out_features
a : Union[str, Any] = out_indices
def lowercase_ ( self ) -> str:
a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Dict = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Dict:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
a : int = MaskFormerSwinModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a : Optional[int] = model(__UpperCAmelCase )
a : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
a : Dict = MaskFormerSwinBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a : Optional[Any] = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__UpperCAmelCase ):
a : Any = ['stem']
a : List[str] = MaskFormerSwinBackbone(config=__UpperCAmelCase )
def lowercase_ ( self ) -> Optional[int]:
a : List[str] = self.prepare_config_and_inputs()
a , a , a : Optional[int] = config_and_inputs
a : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase : Tuple = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowercase : List[str] = False
lowercase : List[str] = False
lowercase : str = False
lowercase : Any = False
lowercase : List[Any] = False
def lowercase_ ( self ) -> List[Any]:
a : str = MaskFormerSwinModelTester(self )
a : Dict = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Dict:
return
def lowercase_ ( self ) -> Optional[int]:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ ( self ) -> Dict:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> List[str]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self ) -> Dict:
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowercase_ ( self ) -> int:
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(__UpperCAmelCase )
a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : int = [*signature.parameters.keys()]
a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Tuple:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> str:
pass
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
a : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
a : str = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
a : Any = outputs.hidden_states
a : int = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swin has a different seq_length
a : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> List[Any]:
a , a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a : Optional[int] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self ) -> Any:
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = 3
a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a : int = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> List[str]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__UpperCAmelCase ):
a : Tuple = 0
return t
def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ):
with torch.no_grad():
a : Optional[int] = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
a : Tuple = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__UpperCAmelCase ) , set_nan_tensor_to_zero(__UpperCAmelCase ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}. Dict has'
f' `nan`: {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}.'
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
a : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
a : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} )
a : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class A_ ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase : List[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase : Optional[int] = MaskFormerSwinConfig
def lowercase_ ( self ) -> List[Any]:
a : Tuple = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Tuple:
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
a : Union[str, Any] = backbone_class(__UpperCAmelCase )
backbone.to(__UpperCAmelCase )
backbone.eval()
a : Dict = backbone(**__UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
a : List[str] = backbone(**__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
a , a , a : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
a : Optional[Any] = backbone(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 509
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
lowercase : int = StableDiffusionLDMaDPipeline
lowercase : Any = TEXT_TO_IMAGE_PARAMS
lowercase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : Tuple = CLIPTextModel(__UpperCAmelCase )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> str:
if str(__UpperCAmelCase ).startswith('mps' ):
a : List[str] = torch.manual_seed(__UpperCAmelCase )
else:
a : int = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Dict:
a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Tuple = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : Tuple = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[Any] = self.get_dummy_inputs(__UpperCAmelCase )
a : List[Any] = ldmad_pipe(**__UpperCAmelCase )
a , a : str = output.rgb, output.depth
a : List[Any] = rgb[0, -3:, -3:, -1]
a : str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a : Any = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
a : Any = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowercase_ ( self ) -> List[Any]:
a : Optional[int] = self.get_dummy_components()
a : Union[str, Any] = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : List[str] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
a : Optional[Any] = 3 * [inputs['prompt']]
# forward
a : List[str] = ldmad_pipe(**__UpperCAmelCase )
a , a : int = output.rgb, output.depth
a : int = rgb_slice_a[0, -3:, -3:, -1]
a : List[Any] = depth_slice_a[0, -3:, -1]
a : Tuple = self.get_dummy_inputs(__UpperCAmelCase )
a : List[str] = 3 * [inputs.pop('prompt' )]
a : Optional[int] = ldmad_pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , )
a : Union[str, Any] = text_inputs['input_ids'].to(__UpperCAmelCase )
a : List[Any] = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
a : str = prompt_embeds
# forward
a : Any = ldmad_pipe(**__UpperCAmelCase )
a , a : Optional[Any] = output.rgb, output.depth
a : Dict = rgb_slice_a[0, -3:, -3:, -1]
a : Any = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowercase_ ( self ) -> Optional[int]:
a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
a : Union[str, Any] = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : Any = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[str] = self.get_dummy_inputs(__UpperCAmelCase )
a : List[str] = 'french fries'
a : List[str] = ldmad_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : List[Any] = rgb[0, -3:, -3:, -1]
a : Optional[int] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a : Optional[int] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
a : Union[str, Any] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> Union[str, Any]:
a : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : str = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
a : Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
a : int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Dict:
a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
a : Union[str, Any] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[Any] = self.get_inputs(__UpperCAmelCase )
a : Dict = ldmad_pipe(**__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : Union[str, Any] = rgb[0, -3:, -3:, -1].flatten()
a : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
a : List[str] = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
a : Optional[int] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int:
a : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
a : Union[str, Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
a : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Optional[int]:
a : Dict = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Tuple = self.get_inputs(__UpperCAmelCase )
a : Dict = ldmad_pipe(**__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : int = 0.49_5586
a : Dict = 0.3379_5515
a : Optional[Any] = 112.4_8518
a : List[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowercase_ ( self ) -> Any:
a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Dict = self.get_inputs(__UpperCAmelCase )
a : Union[str, Any] = ldmad_pipe(**__UpperCAmelCase )
a , a : Optional[Any] = output.rgb, output.depth
a : Dict = 0.419_4127
a : Union[str, Any] = 0.3537_5586
a : Tuple = 0.563_8502
a : str = 0.3468_6103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 509
| 1
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=4 , ) -> Optional[int]:
'''simple docstring'''
__snake_case :Dict = parent
__snake_case :str = batch_size
__snake_case :int = seq_length
__snake_case :List[str] = is_training
__snake_case :Optional[Any] = use_attention_mask
__snake_case :Tuple = use_token_type_ids
__snake_case :List[Any] = use_labels
__snake_case :Dict = vocab_size
__snake_case :Optional[Any] = hidden_size
__snake_case :Any = num_hidden_layers
__snake_case :Dict = num_attention_heads
__snake_case :Tuple = intermediate_size
__snake_case :Optional[int] = hidden_act
__snake_case :Tuple = hidden_dropout_prob
__snake_case :Union[str, Any] = attention_probs_dropout_prob
__snake_case :Dict = max_position_embeddings
__snake_case :Tuple = type_vocab_size
__snake_case :List[Any] = type_sequence_label_size
__snake_case :Optional[int] = initializer_range
__snake_case :str = num_choices
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case :str = None
if self.use_attention_mask:
__snake_case :str = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case :int = None
if self.use_token_type_ids:
__snake_case :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case :Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :Union[str, Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case :Union[str, Any] = config_and_inputs
__snake_case :int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class snake_case__ ( lowercase__ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[Any] = True
lowerCamelCase : int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Any = FlaxRoFormerModelTester(self )
@slow
def __lowercase ( self ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case :int = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=a__ )
__snake_case :Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@slow
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :str = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__snake_case :Optional[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case :int = model(a__ )[0]
__snake_case :str = 5_00_00
__snake_case :Optional[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , a__ )
__snake_case :Optional[int] = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
| 455
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE = True
except ImportError:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@staticmethod
def A__ ( lowerCAmelCase ):
UpperCAmelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowerCAmelCase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowerCAmelCase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , *lowerCAmelCase ):
UpperCAmelCase_ = testing
UpperCAmelCase_ = testing_file
UpperCAmelCase_ = path
def A__ ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowerCAmelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCAmelCase_ = (
Path(lowerCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase , extra_context=lowerCAmelCase , )
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
UpperCAmelCase_ = configuration["lowercase_modelname"]
UpperCAmelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'''{directory}/configuration.json''' )
UpperCAmelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowerCAmelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowerCAmelCase ):
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.readlines()
with open(lowerCAmelCase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Create temp file
UpperCAmelCase_ , UpperCAmelCase_ = mkstemp()
UpperCAmelCase_ = False
with fdopen(lowerCAmelCase , "w" ) as new_file:
with open(lowerCAmelCase ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase )
if line_to_copy_below in line:
UpperCAmelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase , lowerCAmelCase )
# Remove original file
remove(lowerCAmelCase )
# Move new file
move(lowerCAmelCase , lowerCAmelCase )
def skip_units(lowerCAmelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase ):
with open(lowerCAmelCase ) as datafile:
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase )
remove(lowerCAmelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowerCAmelCase )
| 579
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = "src/transformers"
A_ = "docs/source/en/tasks"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Find the start prompt.
lowerCamelCase_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
lowerCamelCase_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
lowerCamelCase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' ,end_prompt='<!--End of the generated tip-->' ,)
lowerCamelCase_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 384
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : List[Any] = "sew-d"
def __init__( self : Dict , A : int=32 , A : Optional[int]=7_68 , A : List[str]=12 , A : int=12 , A : int=30_72 , A : Optional[int]=2 , A : Union[str, Any]=5_12 , A : List[str]=2_56 , A : Union[str, Any]=True , A : Optional[Any]=True , A : Optional[Any]=("p2c", "c2p") , A : List[str]="layer_norm" , A : str="gelu_python" , A : List[str]=0.1 , A : Union[str, Any]=0.1 , A : List[Any]=0.1 , A : List[str]=0.0 , A : Dict=0.1 , A : Tuple=0.02 , A : Any=1e-7 , A : Any=1e-5 , A : Optional[Any]="group" , A : int="gelu" , A : Optional[int]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , A : List[str]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Dict=False , A : Any=1_28 , A : Optional[int]=16 , A : int=True , A : Any=0.05 , A : List[str]=10 , A : Union[str, Any]=2 , A : Dict=0.0 , A : Union[str, Any]=10 , A : Union[str, Any]=0 , A : Tuple="mean" , A : Optional[Any]=False , A : Any=False , A : List[str]=2_56 , A : Optional[Any]=0 , A : List[str]=1 , A : int=2 , **A : Union[str, Any] , ) -> str:
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
lowercase_ : Tuple = hidden_size
lowercase_ : Any = feat_extract_norm
lowercase_ : Optional[int] = feat_extract_activation
lowercase_ : Optional[int] = list(A )
lowercase_ : Optional[int] = list(A )
lowercase_ : int = list(A )
lowercase_ : Union[str, Any] = conv_bias
lowercase_ : Union[str, Any] = num_conv_pos_embeddings
lowercase_ : List[str] = num_conv_pos_embedding_groups
lowercase_ : List[Any] = len(self.conv_dim )
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Tuple = intermediate_size
lowercase_ : str = squeeze_factor
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Dict = position_buckets
lowercase_ : str = share_att_key
lowercase_ : List[Any] = relative_attention
lowercase_ : Tuple = norm_rel_ebd
lowercase_ : Union[str, Any] = list(A )
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Dict = hidden_dropout
lowercase_ : Union[str, Any] = attention_dropout
lowercase_ : str = activation_dropout
lowercase_ : Dict = feat_proj_dropout
lowercase_ : int = final_dropout
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : Dict = feature_layer_norm_eps
lowercase_ : Any = initializer_range
lowercase_ : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : Tuple = apply_spec_augment
lowercase_ : Tuple = mask_time_prob
lowercase_ : int = mask_time_length
lowercase_ : List[Any] = mask_time_min_masks
lowercase_ : str = mask_feature_prob
lowercase_ : List[str] = mask_feature_length
lowercase_ : Optional[Any] = mask_feature_min_masks
# ctc loss
lowercase_ : List[str] = ctc_loss_reduction
lowercase_ : Optional[Any] = ctc_zero_infinity
# sequence classification
lowercase_ : List[str] = use_weighted_layer_sum
lowercase_ : Union[str, Any] = classifier_proj_size
@property
def A ( self : List[str] ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 231
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : str = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "timesformer"
def __init__( self : Optional[Any] , A : Tuple=2_24 , A : Optional[int]=16 , A : Any=3 , A : str=8 , A : Optional[Any]=7_68 , A : Dict=12 , A : Optional[int]=12 , A : Optional[Any]=30_72 , A : Optional[Any]="gelu" , A : Union[str, Any]=0.0 , A : Dict=0.0 , A : str=0.02 , A : Union[str, Any]=1e-6 , A : Union[str, Any]=True , A : Dict="divided_space_time" , A : Optional[Any]=0 , **A : List[str] , ) -> Tuple:
super().__init__(**A )
lowercase_ : Tuple = image_size
lowercase_ : str = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Optional[Any] = num_frames
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : List[str] = qkv_bias
lowercase_ : Any = attention_type
lowercase_ : Dict = drop_path_rate
| 231
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : List[str] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 673
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase_ : List[str] = False
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self , snake_case__=32 ) -> Optional[Any]:
"""simple docstring"""
set_seed(0 )
UpperCAmelCase = UNetaDModel(sample_size=snake_case__ , in_channels=3 , out_channels=3 )
UpperCAmelCase = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=snake_case__ , )
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=snake_case__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
UpperCAmelCase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(snake_case__ ) for _ in range(4 )]
UpperCAmelCase = [torch.randn((4, 3, 32, 32) ).to(snake_case__ ) for _ in range(4 )]
UpperCAmelCase = [torch.randint(0 , 10_00 , (4,) ).long().to(snake_case__ ) for _ in range(4 )]
# train with a DDPM scheduler
UpperCAmelCase , UpperCAmelCase = self.get_model_optimizer(resolution=32 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase = model(snake_case__ , timesteps[i] ).sample
UpperCAmelCase = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCAmelCase , UpperCAmelCase = self.get_model_optimizer(resolution=32 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase = model(snake_case__ , timesteps[i] ).sample
UpperCAmelCase = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
| 673
| 1
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]="attention" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
SCREAMING_SNAKE_CASE__ :List[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
SCREAMING_SNAKE_CASE__ :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
SCREAMING_SNAKE_CASE__ :Union[str, Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False ) -> Dict:
'''simple docstring'''
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
SCREAMING_SNAKE_CASE__ :Dict = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
SCREAMING_SNAKE_CASE__ :Any = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE__ :Tuple = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
SCREAMING_SNAKE_CASE__ :List[Any] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowerCamelCase ( UpperCAmelCase__ : dict , *, UpperCAmelCase__ : int , UpperCAmelCase__ : bool ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = traverse_util.flatten_dict(variables['target'] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {'/'.join(UpperCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE__ :Any = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE__ :int = old['token_embedder/embedding']
# Encoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , 'pre_attention_layer_norm' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , 'attention' )
SCREAMING_SNAKE_CASE__ :str = layer_norm
SCREAMING_SNAKE_CASE__ :Optional[int] = k.T
SCREAMING_SNAKE_CASE__ :str = o.T
SCREAMING_SNAKE_CASE__ :Optional[Any] = q.T
SCREAMING_SNAKE_CASE__ :Dict = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE__ :Dict = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Tuple = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Dict = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ :Dict = wi[0].T
SCREAMING_SNAKE_CASE__ :Dict = wi[1].T
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = wi.T
SCREAMING_SNAKE_CASE__ :int = wo.T
SCREAMING_SNAKE_CASE__ :Dict = old[
'encoder/relpos_bias/rel_embedding'
].T
SCREAMING_SNAKE_CASE__ :List[Any] = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ :Dict = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'pre_self_attention_layer_norm' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'self_attention' )
SCREAMING_SNAKE_CASE__ :Dict = layer_norm
SCREAMING_SNAKE_CASE__ :List[Any] = k.T
SCREAMING_SNAKE_CASE__ :Tuple = o.T
SCREAMING_SNAKE_CASE__ :Union[str, Any] = q.T
SCREAMING_SNAKE_CASE__ :Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE__ :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'pre_cross_attention_layer_norm' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'encoder_decoder_attention' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = layer_norm
SCREAMING_SNAKE_CASE__ :Union[str, Any] = k.T
SCREAMING_SNAKE_CASE__ :Dict = o.T
SCREAMING_SNAKE_CASE__ :Any = q.T
SCREAMING_SNAKE_CASE__ :Union[str, Any] = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[str] = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = wi[0].T
SCREAMING_SNAKE_CASE__ :Optional[int] = wi[1].T
else:
SCREAMING_SNAKE_CASE__ :List[Any] = wi.T
SCREAMING_SNAKE_CASE__ :Any = wo.T
SCREAMING_SNAKE_CASE__ :int = old['decoder/decoder_norm/scale']
SCREAMING_SNAKE_CASE__ :str = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE__ :str = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ :List[Any] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ :Optional[Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
SCREAMING_SNAKE_CASE__ :int = state_dict['shared.weight']
return state_dict
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[str] = convert_tax_to_pytorch(UpperCAmelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :str = make_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = TaConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE__ :Optional[int] = TaEncoderModel(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :int = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase__ )
print('Done' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
UpperCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 320
|
'''simple docstring'''
UpperCamelCase_ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase_ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = start
# add current to visited
visited.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ :Dict = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ :Optional[int] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase_ = topological_sort('''a''', [], [])
print(sort)
| 320
| 1
|
from itertools import count
def a__ ( A_ = 50 ):
'''simple docstring'''
__magic_name__ = [1] * min_block_length
for n in count(lowercase__ ):
fill_count_functions.append(1 )
for block_length in range(lowercase__, n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 529
|
import os
import sys
import unittest
lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= get_test_to_tester_mapping(lowerCAmelCase )
__lowercase= get_test_to_tester_mapping(lowerCAmelCase )
__lowercase= {'BertModelTest': 'BertModelTester'}
__lowercase= {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_test_mapping(lowerCAmelCase )
__lowercase= get_model_to_test_mapping(lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_tester_mapping(lowerCAmelCase )
__lowercase= get_model_to_tester_mapping(lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
| 230
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase : Union[str, Any] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase : int = BASE_URL + "/user"
# https://github.com/settings/tokens
__lowerCAmelCase : Tuple = os.environ.get("USER_TOKEN", "")
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""Authorization""": f"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowerCamelCase__ , headers=lowerCamelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 674
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__UpperCAmelCase =5_0_0_0_3
__UpperCAmelCase =5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class a__ ( _UpperCamelCase , unittest.TestCase ):
lowerCamelCase : Any =PLBartTokenizer
lowerCamelCase : Optional[Any] =None
lowerCamelCase : str =False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = PLBartTokenizer(_UpperCAmelCase , language_codes='''base''' , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = PLBartTokenizer(_UpperCAmelCase , language_codes='''base''' , keep_accents=_UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__lowerCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowerCamelCase = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = PLBartTokenizer(_UpperCAmelCase , language_codes='''multi''' , keep_accents=_UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__lowerCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowerCamelCase = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
lowerCamelCase : Optional[Any] ="uclanlp/plbart-python-en_XX"
lowerCamelCase : Dict =[
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowerCamelCase : List[str] =[
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowerCamelCase : Tuple =[
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict ):
"""simple docstring"""
__lowerCamelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__lowerCamelCase = 1
return cls
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_00_03 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__lowerCamelCase = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
__lowerCamelCase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
__lowerCamelCase = 10
__lowerCamelCase = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_00_04, 5_00_01] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCamelCase = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors='''pt''' )
__lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors='''pt''' )
__lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors='''pt''' )
__lowerCamelCase = targets['''input_ids''']
__lowerCamelCase = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_50, 2_42, 2, 5_00_03]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_00_01,
} , )
| 546
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
_A : Union[str, Any] = GPTSwaTokenizer
_A : Optional[int] = False
_A : Optional[int] = True
_A : List[Any] = False
def __lowerCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = GPTSwaTokenizer(__A , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = 'This is a test'
__UpperCAmelCase = 'This is a test'
return input_text, output_text
def __lowerCamelCase ( self ):
__UpperCAmelCase = '<s>'
__UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__A ) , 2_000 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def __lowerCamelCase ( self ):
__UpperCAmelCase = GPTSwaTokenizer(__A )
__UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(__A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [465, 287, 265, 631, 842] )
__UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
__A , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__A )
# fmt: off
self.assertListEqual(
__A , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __lowerCamelCase ( self ):
__UpperCAmelCase = GPTSwaTokenizer(__A )
__UpperCAmelCase = ['This is a test', 'I was born in 92000, and this is falsé.']
__UpperCAmelCase = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__A , __A ):
self.assertListEqual(tokenizer.encode_fast(__A ) , __A )
# Test that decode_fast returns the input text
for text, token_ids in zip(__A , __A ):
self.assertEqual(tokenizer.decode_fast(__A ) , __A )
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
__UpperCAmelCase = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name='AI-Sweden/gpt-sw3-126m' , sequences=__A , )
| 716
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: str = logging.get_logger(__name__)
_A: Optional[Any] = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : Tuple = """autoformer"""
_A : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __A = None , __A = None , __A = "student_t" , __A = "nll" , __A = 1 , __A = [1, 2, 3, 4, 5, 6, 7] , __A = True , __A = 0 , __A = 0 , __A = 0 , __A = 0 , __A = None , __A = None , __A = 64 , __A = 2 , __A = 2 , __A = 2 , __A = 2 , __A = 32 , __A = 32 , __A = "gelu" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 100 , __A = 0.0_2 , __A = True , __A=True , __A = 10 , __A = 25 , __A = 3 , **__A , ):
# time series specific configuration
__UpperCAmelCase = prediction_length
__UpperCAmelCase = context_length if context_length is not None else prediction_length
__UpperCAmelCase = distribution_output
__UpperCAmelCase = loss
__UpperCAmelCase = input_size
__UpperCAmelCase = num_time_features
__UpperCAmelCase = lags_sequence
__UpperCAmelCase = scaling
__UpperCAmelCase = num_dynamic_real_features
__UpperCAmelCase = num_static_real_features
__UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__UpperCAmelCase = cardinality
else:
__UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__UpperCAmelCase = embedding_dimension
else:
__UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = use_cache
# Autoformer
__UpperCAmelCase = label_length
__UpperCAmelCase = moving_average
__UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCamelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 617
| 0
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, *A, **A ):
'''simple docstring'''
super().__init__(*A, **A )
requires_backends(self, 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase_ ( self, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {}
if top_k is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = top_k
return {}, {}, postprocess_params
def __call__( self, A, **A ):
'''simple docstring'''
return super().__call__(A, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = load_image(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(images=A, return_tensors=self.framework )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE : int = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE : str = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = probs.topk(A )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE : Optional[Any] = stable_softmax(model_outputs.logits, axis=-1 )[0]
SCREAMING_SNAKE_CASE : int = tf.math.top_k(A, k=A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
SCREAMING_SNAKE_CASE : Optional[int] = scores.tolist()
SCREAMING_SNAKE_CASE : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A, A )]
| 28
|
import os
import sys
import unittest
__a: Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a: Tuple = os.path.join(git_repo_path, '''src''', '''transformers''')
__a: Optional[Any] = '''
{0} = None
'''
__a: List[Any] = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__a: int = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowerCamelCase )
_UpperCAmelCase = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowerCamelCase , """tokenizers""" )
_UpperCAmelCase = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowerCamelCase , """tensorflow_text""" )
_UpperCAmelCase = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowerCamelCase , """sentencepiece_and_tokenizers""" )
_UpperCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowerCamelCase , """sentencepiece_and_tensorflow_text""" )
_UpperCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowerCamelCase , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowerCamelCase )
self.assertIn("""tensorflow_text""" , lowerCamelCase )
self.assertIn("""sentencepiece_and_tokenizers""" , lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(lowerCamelCase , """\nCONSTANT = None\n""" )
_UpperCAmelCase = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
lowerCamelCase , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_UpperCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
_UpperCAmelCase = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
_UpperCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , lowerCamelCase )
| 108
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = 3
lowerCAmelCase = (32, 32)
lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
@property
def __snake_case ( self ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(lowercase_ )
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.dummy_cond_unet_upscale
lowerCAmelCase = DDPMScheduler()
lowerCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCAmelCase = self.dummy_vae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=3_50 , )
lowerCAmelCase = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase = '''A painting of a squirrel eating a burger'''
lowerCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=lowercase_ , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.dummy_cond_unet_upscale
lowerCAmelCase = DDPMScheduler()
lowerCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCAmelCase = self.dummy_vae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=3_50 , )
lowerCAmelCase = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase = '''A painting of a squirrel eating a burger'''
lowerCAmelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase = output.images
assert image.shape[0] == 2
lowerCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self ):
lowerCAmelCase = self.dummy_cond_unet_upscale
lowerCAmelCase = DDPMScheduler()
lowerCAmelCase = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCAmelCase = self.dummy_vae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase = unet.half()
lowerCAmelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=3_50 , )
lowerCAmelCase = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase = '''A painting of a squirrel eating a burger'''
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='''np''' , ).images
lowerCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowerCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = '''a cat sitting on a park bench'''
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowerCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = '''a cat sitting on a park bench'''
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = '''a cat sitting on a park bench'''
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , num_inference_steps=5 , output_type='''np''' , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 715
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert"""
UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# File is cached at the same place the second time.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Using a specific revision to test the full commit hash.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' )
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ )
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 5_00
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head:
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
def __snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' )
lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
| 33
| 0
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor']
lowerCAmelCase__ = 'SamImageProcessor'
def __init__( self: Tuple ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
_lowerCamelCase : int = self.image_processor
_lowerCamelCase : str = -10
_lowerCamelCase : Optional[Any] = self.image_processor.size["longest_edge"]
def __call__( self: Dict ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.image_processor(
__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ,)
# pop arguments that are not used in the foward but used nevertheless
_lowerCamelCase : Union[str, Any] = encoding_image_processor["original_sizes"]
if hasattr(__lowerCAmelCase ,"numpy" ): # Checks if Torch or TF tensor
_lowerCamelCase : Dict = original_sizes.numpy()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._check_and_preprocess_points(
input_points=__lowerCAmelCase ,input_labels=__lowerCAmelCase ,input_boxes=__lowerCAmelCase ,)
_lowerCamelCase : Optional[Any] = self._normalize_and_convert(
__lowerCAmelCase ,__lowerCAmelCase ,input_points=__lowerCAmelCase ,input_labels=__lowerCAmelCase ,input_boxes=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,)
return encoding_image_processor
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Union[str, Any]="pt" ,):
'''simple docstring'''
if input_points is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,original_sizes[0] ) for point in input_points
]
else:
_lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,__lowerCAmelCase )
for point, original_size in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCamelCase, _lowerCamelCase : int = self._pad_points_and_labels(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = np.array(__lowerCAmelCase )
if input_labels is not None:
_lowerCamelCase : Union[str, Any] = np.array(__lowerCAmelCase )
if input_boxes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,original_sizes[0] ,is_bounding_box=__lowerCAmelCase )
for box in input_boxes
]
else:
_lowerCamelCase : str = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,__lowerCAmelCase ,is_bounding_box=__lowerCAmelCase )
for box, original_size in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
_lowerCamelCase : List[str] = np.array(__lowerCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCamelCase : str = torch.from_numpy(__lowerCAmelCase )
# boxes batch size of 1 by default
_lowerCamelCase : Dict = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCamelCase : Tuple = tf.convert_to_tensor(__lowerCAmelCase )
# boxes batch size of 1 by default
_lowerCamelCase : str = tf.expand_dims(__lowerCAmelCase ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCamelCase : Dict = torch.from_numpy(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : Any = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCamelCase : Tuple = tf.convert_to_tensor(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : Union[str, Any] = tf.expand_dims(__lowerCAmelCase ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCamelCase : Tuple = torch.from_numpy(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : List[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCamelCase : Optional[Any] = tf.convert_to_tensor(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : List[str] = tf.expand_dims(__lowerCAmelCase ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = max([point.shape[0] for point in input_points] )
_lowerCamelCase : int = []
for i, point in enumerate(__lowerCAmelCase ):
if point.shape[0] != expected_nb_points:
_lowerCamelCase : str = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
_lowerCamelCase : Dict = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(__lowerCAmelCase )
_lowerCamelCase : str = processed_input_points
return input_points, input_labels
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str]=False ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = original_size
_lowerCamelCase, _lowerCamelCase : Dict = self.image_processor._get_preprocess_shape(__lowerCAmelCase ,longest_edge=__lowerCAmelCase )
_lowerCamelCase : Dict = deepcopy(__lowerCAmelCase ).astype(__lowerCAmelCase )
if is_bounding_box:
_lowerCamelCase : int = coords.reshape(-1 ,2 ,2 )
_lowerCamelCase : str = coords[..., 0] * (new_w / old_w)
_lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCamelCase : Any = coords.reshape(-1 ,4 )
return coords
def _lowercase ( self: int ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: str=None ,):
'''simple docstring'''
if input_points is not None:
if hasattr(__lowerCAmelCase ,"numpy" ): # Checks for TF or Torch tensor
_lowerCamelCase : Any = input_points.numpy().tolist()
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or not isinstance(input_points[0] ,__lowerCAmelCase ):
raise ValueError("Input points must be a list of list of floating points." )
_lowerCamelCase : Union[str, Any] = [np.array(__lowerCAmelCase ) for input_point in input_points]
else:
_lowerCamelCase : Dict = None
if input_labels is not None:
if hasattr(__lowerCAmelCase ,"numpy" ):
_lowerCamelCase : Any = input_labels.numpy().tolist()
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or not isinstance(input_labels[0] ,__lowerCAmelCase ):
raise ValueError("Input labels must be a list of list integers." )
_lowerCamelCase : Any = [np.array(__lowerCAmelCase ) for label in input_labels]
else:
_lowerCamelCase : Tuple = None
if input_boxes is not None:
if hasattr(__lowerCAmelCase ,"numpy" ):
_lowerCamelCase : str = input_boxes.numpy().tolist()
if (
not isinstance(__lowerCAmelCase ,__lowerCAmelCase )
or not isinstance(input_boxes[0] ,__lowerCAmelCase )
or not isinstance(input_boxes[0][0] ,__lowerCAmelCase )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_lowerCamelCase : Any = [np.array(__lowerCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCamelCase : List[Any] = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(__lowerCAmelCase ) )
def _lowercase ( self: List[str] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__lowerCAmelCase ,**__lowerCAmelCase )
| 46
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[Any] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Tuple = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
snake_case_ : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = SqueezeBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ):
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A_ ) != do_lower_case
or normalizer_state.get("strip_accents" , A_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A_ ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(A_ , normalizer_state.pop("type" ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**A_ )
_UpperCamelCase = do_lower_case
def a ( self , A_ , A_=None ):
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , A_ , A_ = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , A_ , A_ = None ):
_UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 138
| 0
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int =GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : List[Any] =None
ops.enable_eager_execution_internal()
snake_case__ : Any =tf.config.list_physical_devices('''CPU''' )
if len(__SCREAMING_SNAKE_CASE ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
snake_case__ : Tuple =tf.config.list_logical_devices(device_type='''CPU''' )
snake_case__ : List[str] =tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
snake_case__ : List[str] =GradientAccumulator()
snake_case__ : Any =tf.Variable([4.0, 3.0] )
snake_case__, snake_case__ : str =create_optimizer(5e-5 , 10 , 5 )
snake_case__ : Optional[int] =tf.Variable([0.0, 0.0] , trainable=__SCREAMING_SNAKE_CASE )
def accumulate_on_replica(__SCREAMING_SNAKE_CASE ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
with strategy.scope():
snake_case__ : Tuple =strategy.experimental_local_results(__SCREAMING_SNAKE_CASE )
local_variables[0].assign(__SCREAMING_SNAKE_CASE )
local_variables[1].assign(__SCREAMING_SNAKE_CASE )
strategy.run(__SCREAMING_SNAKE_CASE , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__SCREAMING_SNAKE_CASE )
def _check_local_values(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple =strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __SCREAMING_SNAKE_CASE , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __SCREAMING_SNAKE_CASE , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 408
|
from torch import nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple =class_size
snake_case__ : List[Any] =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case__ : Optional[Any] =nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
snake_case__ : str =self.mlp(__SCREAMING_SNAKE_CASE )
return logits
| 408
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowercase (__lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'num_encoder_blocks' ) )
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[2, 2, 2, 2] , lowerCAmelCase__=[8, 4, 2, 1] , lowerCAmelCase__=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase__=[1, 4, 8, 1_6] , lowerCAmelCase__=[1, 2, 4, 8] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = num_encoder_blocks
SCREAMING_SNAKE_CASE_ : int = sr_ratios
SCREAMING_SNAKE_CASE_ : int = depths
SCREAMING_SNAKE_CASE_ : Any = hidden_sizes
SCREAMING_SNAKE_CASE_ : Dict = downsampling_rates
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = num_labels
SCREAMING_SNAKE_CASE_ : Tuple = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SegformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : str = SegformerForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = SegformerForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : str = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = SegformerModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = SegformerConfigTester(self , config_class=_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCamelCase )
@unittest.skip('SegFormer does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.attentions
SCREAMING_SNAKE_CASE_ : Optional[int] = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ : str = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE_ : Tuple = (self.model_tester.image_size // 3_2) ** 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
SCREAMING_SNAKE_CASE_ : Any = len(_UpperCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : Any = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ : Any = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Dict = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCamelCase ):
continue
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = model(**_UpperCamelCase ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SegformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=_UpperCamelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Dict = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=_UpperCamelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Dict = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1E-1 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=_UpperCamelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : str = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(5_0_0, 3_0_0)] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 101
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 151
| 0
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase : Dict =NewType('DataClass', Any)
lowerCAmelCase : Any =NewType('DataClassType', Any)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(_lowercase ,_lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = {str(_lowercase ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE__ : str_to_choice.get(_lowercase ,_lowercase )
def _UpperCAmelCase ( *,
SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = dataclasses.MISSING ,SCREAMING_SNAKE_CASE__ = dataclasses.MISSING ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase : List[Any] = {}
if aliases is not None:
lowerCAmelCase : Optional[int] = aliases
if help is not None:
lowerCAmelCase : List[str] = help
return dataclasses.field(metadata=_lowercase ,default=_lowercase ,default_factory=_lowercase ,**_lowercase )
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = 42
def __init__( self , lowercase_ , **lowercase_ ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase : Optional[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
lowerCAmelCase : Optional[int] = [dataclass_types]
lowerCAmelCase : int = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> str:
lowerCAmelCase : Dict = f"""--{field.name}"""
lowerCAmelCase : Dict = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
lowerCAmelCase : Dict = kwargs.pop("""aliases""" , [] )
if isinstance(__A , __A ):
lowerCAmelCase : int = [aliases]
lowerCAmelCase : int = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(__A , """UnionType""" ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase : Dict = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase : str = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase : str = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase : int = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
lowerCAmelCase : Optional[int] = field.type.__args__
else:
lowerCAmelCase : Dict = [x.value for x in field.type]
lowerCAmelCase : Dict = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase : str = field.default
else:
lowerCAmelCase : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase : int = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase : Any = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase : Dict = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase : Dict = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase : Optional[int] = """?"""
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase : str = True
elif isclass(__A ) and issubclass(__A , __A ):
lowerCAmelCase : Tuple = field.type.__args__[0]
lowerCAmelCase : str = """+"""
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase : Dict = True
else:
lowerCAmelCase : Tuple = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase : List[str] = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : List[Any] = field.default_factory()
else:
lowerCAmelCase : int = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase : List[Any] = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **__A )
def _snake_case ( self , lowercase_ ) -> List[Any]:
if hasattr(__A , """_argument_group_name""" ):
lowerCAmelCase : Optional[Any] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase : Optional[Any] = self
try:
lowerCAmelCase : Optional[int] = get_type_hints(__A )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
lowerCAmelCase : Tuple = """.""".join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
lowerCAmelCase : Dict = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def _snake_case ( self , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_=None , lowercase_=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase : Tuple = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase : Any = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase , lowerCAmelCase : Optional[Any] = args_file_parser.parse_known_args(args=__A )
lowerCAmelCase : Dict = vars(__A ).get(args_file_flag.lstrip("""-""" ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
lowerCAmelCase : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase : Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase , lowerCAmelCase : Tuple = self.parse_known_args(args=__A )
lowerCAmelCase : List[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : int = {f.name for f in dataclasses.fields(__A ) if f.init}
lowerCAmelCase : str = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
lowerCAmelCase : int = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _snake_case ( self , lowercase_ , lowercase_ = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase : Optional[int] = set(args.keys() )
lowerCAmelCase : Optional[int] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Optional[Any] = {f.name for f in dataclasses.fields(__A ) if f.init}
lowerCAmelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase : Optional[int] = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" )
return tuple(__A )
def _snake_case ( self , lowercase_ , lowercase_ = False ) -> Tuple[DataClass, ...]:
with open(Path(__A ) , encoding="""utf-8""" ) as open_json_file:
lowerCAmelCase : List[Any] = json.loads(open_json_file.read() )
lowerCAmelCase : Optional[Any] = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def _snake_case ( self , lowercase_ , lowercase_ = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase : Dict = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 718
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693
| 0
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : str = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : str = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__lowerCamelCase : Optional[int] = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
def remove_articles(snake_case_ : Optional[Any] ):
snake_case__ : str = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(__UpperCamelCase , " " , __UpperCamelCase )
def white_space_fix(snake_case_ : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(snake_case_ : Dict ):
snake_case__ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int ):
snake_case__ : List[Any] = [any(compute_exact(__UpperCamelCase , __UpperCamelCase ) for ref in refs ) for pred, refs in zip(__UpperCamelCase , __UpperCamelCase )]
return (sum(__UpperCamelCase ) / len(__UpperCamelCase )) * 100
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : int , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
snake_case__ : Any = [rgram for rgrams in rgramslist for rgram in rgrams]
snake_case__ : Union[str, Any] = Counter(__UpperCamelCase )
snake_case__ : Tuple = Counter(__UpperCamelCase )
snake_case__ : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
snake_case__ : Dict = scount * numref
snake_case__ : str = Counter(__UpperCamelCase )
snake_case__ : Any = Counter()
for cgram, ccount in cgramcounter.items():
snake_case__ : Union[str, Any] = ccount * numref
# KEEP
snake_case__ : List[str] = sgramcounter_rep & cgramcounter_rep
snake_case__ : int = keepgramcounter_rep & rgramcounter
snake_case__ : List[Any] = sgramcounter_rep & rgramcounter
snake_case__ : Tuple = 0
snake_case__ : Optional[int] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ : Union[str, Any] = 1
snake_case__ : Tuple = 1
if len(__UpperCamelCase ) > 0:
snake_case__ : Any = keeptmpscorea / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case__ : str = keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case__ : Union[str, Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case__ : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case__ : int = sgramcounter_rep - cgramcounter_rep
snake_case__ : List[Any] = delgramcounter_rep - rgramcounter
snake_case__ : Tuple = sgramcounter_rep - rgramcounter
snake_case__ : int = 0
snake_case__ : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ : Any = 1
if len(__UpperCamelCase ) > 0:
snake_case__ : Tuple = deltmpscorea / len(__UpperCamelCase )
# ADDITION
snake_case__ : int = set(__UpperCamelCase ) - set(__UpperCamelCase )
snake_case__ : List[str] = set(__UpperCamelCase ) & set(__UpperCamelCase )
snake_case__ : Tuple = set(__UpperCamelCase ) - set(__UpperCamelCase )
snake_case__ : Tuple = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ : str = 1
snake_case__ : Any = 1
if len(__UpperCamelCase ) > 0:
snake_case__ : Optional[int] = addtmpscore / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case__ : Any = addtmpscore / len(__UpperCamelCase )
snake_case__ : Dict = 0
if addscore_precision > 0 or addscore_recall > 0:
snake_case__ : List[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Any ):
snake_case__ : Optional[int] = len(__UpperCamelCase )
snake_case__ : str = ssent.split(" " )
snake_case__ : Tuple = csent.split(" " )
snake_case__ : List[Any] = []
snake_case__ : Dict = []
snake_case__ : int = []
snake_case__ : Union[str, Any] = []
snake_case__ : str = []
snake_case__ : Dict = []
snake_case__ : Any = []
snake_case__ : List[str] = []
snake_case__ : str = []
snake_case__ : Tuple = []
for rsent in rsents:
snake_case__ : List[Any] = rsent.split(" " )
snake_case__ : str = []
snake_case__ : Dict = []
snake_case__ : str = []
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case__ : Optional[Any] = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case__ : List[str] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case__ : Tuple = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case__ : List[Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case__ : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case__ : int = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case__ : Optional[Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case__ : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case__ : List[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(__UpperCamelCase )
(snake_case__) : Optional[int] = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
(snake_case__) : Optional[int] = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
(snake_case__) : Union[str, Any] = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
(snake_case__) : Dict = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case__ : Any = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case__ : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
snake_case__ : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
snake_case__ : Any = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[int] = True , snake_case_ : Dict = "13a" , snake_case_ : Union[str, Any] = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
snake_case__ : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case__ : List[Any] = sacrebleu.metrics.bleu._get_tokenizer(__UpperCamelCase )()(__UpperCamelCase )
else:
snake_case__ : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__UpperCamelCase )
elif tokenizer == "moses":
snake_case__ : List[Any] = sacremoses.MosesTokenizer().tokenize(__UpperCamelCase , return_str=__UpperCamelCase , escape=__UpperCamelCase )
elif tokenizer == "penn":
snake_case__ : Any = sacremoses.MosesTokenizer().penn_tokenize(__UpperCamelCase , return_str=__UpperCamelCase )
else:
snake_case__ : str = sentence
if not return_str:
snake_case__ : Union[str, Any] = normalized_sent.split()
return normalized_sent
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict ):
if not (len(__UpperCamelCase ) == len(__UpperCamelCase ) == len(__UpperCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
snake_case__ : Tuple = 0
for src, pred, refs in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
sari_score += SARIsent(normalize(__UpperCamelCase ) , normalize(__UpperCamelCase ) , [normalize(__UpperCamelCase ) for sent in refs] )
snake_case__ : str = sari_score / len(__UpperCamelCase )
return 100 * sari_score
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]="exp" , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=False , snake_case_ : str=False , snake_case_ : Dict=False , ):
snake_case__ : Union[str, Any] = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
snake_case__ : Optional[Any] = sacrebleu.corpus_bleu(
__UpperCamelCase , __UpperCamelCase , smooth_method=__UpperCamelCase , smooth_value=__UpperCamelCase , force=__UpperCamelCase , lowercase=__UpperCamelCase , use_effective_order=__UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] , __A : Union[str, Any] , __A : Any ):
snake_case__ : Optional[int] = {}
result.update({"sari": compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({"exact": compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result
| 297
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a__ : List[str] = flax_key_tuple[:-1] + ("weight",)
a__ : Optional[int] = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
a__ : Tuple = flax_key_tuple[:-1] + ("weight",)
a__ : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a__ : str = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if "metadata" in layer:
a__ : Union[str, Any] = layer.split("metadata" )
a__ : Any = "".join(split_layer[0] )[:-1]
a__ : Optional[int] = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
a__ : Optional[int] = layer.split("kvstore" )
a__ : Union[str, Any] = "".join(split_layer[0] )[:-1]
a__ : Optional[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
a__ : Any = layer.split("/" )
a__ : Union[str, Any] = "/".join(split_layer[:-1] )
a__ : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
a__ : Tuple = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a__ : int = "file"
else:
a__ : List[str] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
a__ : List[Any] = rename_keys(__UpperCamelCase )
a__ : Tuple = {}
for k, v in current_block.items():
a__ : str = v
a__ : Optional[int] = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ) -> Any:
a__ : Optional[Any] = convert_file_size_to_int(__UpperCamelCase )
a__ : Tuple = []
a__ : Dict = {}
a__ : str = 0
a__ : Tuple = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
a__ : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
a__ : Tuple = flatten_dict(__UpperCamelCase , sep="/" )
a__ : Any = {}
for layer in checkpoint_info.keys():
a__ , a__ , a__ : Optional[Any] = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
a__ : int = content
else:
a__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a__ : Tuple = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a__ : Tuple = torch.tensor(__UpperCamelCase )
a__ : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a__ , a__ : int = rename_base_flax_keys(tuple(key.split("/" ) ) , __UpperCamelCase )
a__ : Union[str, Any] = "/".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a__ : int = os.path.join(
__UpperCamelCase , weights_name.replace(".bin" , F'-{len(__UpperCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a__ : int = {}
a__ : Optional[int] = 0
a__ : List[str] = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a__ : Optional[int] = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'-{len(__UpperCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a__ : int = {}
a__ : Tuple = {}
for idx, shard in enumerate(__UpperCamelCase ):
a__ : Tuple = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a__ : Optional[int] = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
a__ : List[str] = shard
for key in shard:
a__ : Tuple = shard_file
# Add the metadata
a__ : int = {"total_size": total_size}
a__ : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , "w" , encoding="utf-8" ) as f:
a__ : List[str] = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + "\n"
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCamelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE( ) -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a__ : Any = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
a__ : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
a__ : str = TaTokenizer.from_pretrained("t5-small" )
a__ : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
a__ : Optional[int] = tokenizer(__UpperCamelCase , return_tensors="pt" ).input_ids
a__ : Optional[Any] = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 191
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a_ :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None = None
UpperCamelCase_ : Node | None = None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = Node(1 )
lowerCAmelCase__ = Node(2 )
lowerCAmelCase__ = Node(3 )
lowerCAmelCase__ = Node(4 )
lowerCAmelCase__ = Node(5 )
return tree
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
if root is None:
return output
lowerCAmelCase__ = deque([root] )
while process_queue:
lowerCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if root is None:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = height(lowerCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase__ = 0
return output
def _UpperCAmelCase ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ = make_tree()
print(f"""In-order Traversal: {inorder(lowerCamelCase__ )}""" )
print(f"""Pre-order Traversal: {preorder(lowerCamelCase__ )}""" )
print(f"""Post-order Traversal: {postorder(lowerCamelCase__ )}""" , """\n""" )
print(f"""Height of Tree: {height(lowerCamelCase__ )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowerCamelCase__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowerCamelCase__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase__ , level=lowerCamelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 674
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a ( __snake_case : Any, __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = torch.load(__snake_case, map_location='''cpu''' )
UpperCAmelCase_ :Optional[Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ :str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ :int = v
else:
UpperCAmelCase_ :str = v
UpperCAmelCase_ :Dict = chkpt['''params''']
UpperCAmelCase_ :Dict = {n: v for n, v in config.items() if not isinstance(__snake_case, (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ :Optional[Any] = chkpt['''dico_word2id''']
UpperCAmelCase_ :Optional[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''', '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ :int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ :int = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ :Tuple = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(__snake_case, __snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case, indent=2 ) + '''\n''' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case, indent=2 ) + '''\n''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 608
|
"""simple docstring"""
import os
import sys
import unittest
__lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCamelCase = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Tuple = find_backend(''' if not is_torch_available():''' )
self.assertEqual(snake_case , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCAmelCase_ :List[str] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCAmelCase_ :str = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers_and_onnx''' )
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Any = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , snake_case )
self.assertIn('''torch_and_transformers''' , snake_case )
self.assertIn('''flax_and_transformers''' , snake_case )
self.assertIn('''torch_and_transformers_and_onnx''' , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(snake_case , '''\nCONSTANT = None\n''' )
UpperCAmelCase_ :Tuple = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
snake_case , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
UpperCAmelCase_ :int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
UpperCAmelCase_ :int = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(snake_case , snake_case )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :int = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
UpperCAmelCase_ :Optional[int] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , snake_case )
| 608
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def __init__( self : Any , a_ : UNetaDModel , a_ : KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : Dict , a_ : int = 1 , a_ : int = 50 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : List[str] , ):
"""simple docstring"""
__snake_case = self.unet.config.sample_size
__snake_case = (batch_size, 3, img_size, img_size)
__snake_case = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__snake_case = randn_tensor(a_ , generator=a_ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__snake_case = self.scheduler.schedule[t]
__snake_case = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__snake_case , __snake_case = self.scheduler.add_noise_to_input(a_ , a_ , generator=a_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__snake_case = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__snake_case = self.scheduler.step(a_ , a_ , a_ , a_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__snake_case = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__snake_case = self.scheduler.step_correct(
a_ , a_ , a_ , a_ , step_output.prev_sample , step_output["derivative"] , )
__snake_case = step_output.prev_sample
__snake_case = (sample / 2 + 0.5).clamp(0 , 1 )
__snake_case = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 680
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE (A_ ):
"""simple docstring"""
__a ='''microsoft/speecht5_tts'''
__a =(
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
__a ='''text_reader'''
__a =SpeechTaProcessor
__a =SpeechTaForTextToSpeech
__a =SpeechTaHifiGan
__a =['''text''']
__a =['''audio''']
def UpperCamelCase__ ( self : str ):
if self.post_processor is None:
_a = """microsoft/speecht5_hifigan"""
super().setup()
def UpperCamelCase__ ( self : Tuple , __a : str , __a : List[str]=None ):
_a = self.pre_processor(text=__a , return_tensors="pt" , truncation=__a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
_a = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
_a = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self : Optional[Any] , __a : Any ):
with torch.no_grad():
return self.model.generate_speech(**__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
with torch.no_grad():
return self.post_processor(__a ).cpu().detach()
| 692
|
from copy import deepcopy
class __A :
def __init__(self , __magic_name__ = None , __magic_name__ = None ):
if arr is None and size is not None:
lowerCamelCase__ : int = size
lowerCamelCase__ : Union[str, Any] = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : str = len(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
lowerCamelCase__ : List[Any] = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase__ : List[str] = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _snake_case (__magic_name__ ):
return index + (index & (-index))
@staticmethod
def _snake_case (__magic_name__ ):
return index - (index & (-index))
def _snake_case (self , __magic_name__ , __magic_name__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase__ : int = self.next_(__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def _snake_case (self , __magic_name__ ):
if right == 0:
return 0
lowerCamelCase__ : Optional[int] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase__ : Dict = self.prev(__magic_name__ )
return result
def _snake_case (self , __magic_name__ , __magic_name__ ):
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def _snake_case (self , __magic_name__ ):
return self.query(__magic_name__ , index + 1 )
def _snake_case (self , __magic_name__ ):
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase__ : Dict = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase__ : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 0
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__A = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
__A = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = create_model(
'HTSAT-tiny' , 'roberta' , _SCREAMING_SNAKE_CASE , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_SCREAMING_SNAKE_CASE , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :int = {}
lowerCAmelCase__ :Optional[Any] = r'.*sequential.(\d+).*'
lowerCAmelCase__ :str = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase__ :Any = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
lowerCAmelCase__ :int = re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(1 )
lowerCAmelCase__ :Tuple = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_SCREAMING_SNAKE_CASE )//3}.linear." )
elif re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCAmelCase__ :List[str] = 1 if projecton_layer == 0 else 2
lowerCAmelCase__ :List[Any] = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCAmelCase__ :List[str] = value
lowerCAmelCase__ :Union[str, Any] = mixed_qkv.size(0 ) // 3
lowerCAmelCase__ :List[str] = mixed_qkv[:qkv_dim]
lowerCAmelCase__ :List[str] = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCAmelCase__ :Optional[Any] = mixed_qkv[qkv_dim * 2 :]
lowerCAmelCase__ :int = query_layer
lowerCAmelCase__ :Tuple = key_layer
lowerCAmelCase__ :Optional[int] = value_layer
else:
lowerCAmelCase__ :Optional[Any] = value
return model_state_dict
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = init_clap(_SCREAMING_SNAKE_CASE , enable_fusion=_SCREAMING_SNAKE_CASE )
clap_model.eval()
lowerCAmelCase__ :str = clap_model.state_dict()
lowerCAmelCase__ :Dict = rename_state_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = ClapConfig()
lowerCAmelCase__ :str = enable_fusion
lowerCAmelCase__ :int = ClapModel(_SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__A = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 560
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :Tuple = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :Dict = 3
lowerCAmelCase__ :int = (3_2, 3_2)
lowerCAmelCase__ :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
lowerCAmelCase__ :int = self.dummy_input
return init_dict, inputs_dict
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :List[str] = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :str = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowerCAmelCase__ :Dict = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model_accelerate.to(__UpperCAmelCase )
model_accelerate.eval()
lowerCAmelCase__ :List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :List[str] = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model_accelerate(__UpperCAmelCase , __UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase , low_cpu_mem_usage=__UpperCAmelCase )
model_normal_load.to(__UpperCAmelCase )
model_normal_load.eval()
lowerCAmelCase__ :Optional[int] = model_normal_load(__UpperCAmelCase , __UpperCAmelCase )['sample']
assert torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :int = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Tuple = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) )
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = UNetaDModel
__magic_name__ :Optional[int] = """sample"""
@property
def snake_case ( self , __UpperCAmelCase=(3_2, 3_2) ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 4
lowerCAmelCase__ :int = 3
lowerCAmelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowerCAmelCase__ :Any = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.dummy_input
lowerCAmelCase__ :Union[str, Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = noise
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 4
lowerCAmelCase__ :Any = 3
lowerCAmelCase__ :Dict = (2_5_6, 2_5_6)
lowerCAmelCase__ :int = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :str = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :List[Any] = 3
lowerCAmelCase__ :Dict = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Any = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
pass
| 560
| 1
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __A ( a_ : List[Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __A ( a_ : Dict ,a_ : List[Any] ):
if args.student_type == "roberta":
lowerCAmelCase : Tuple = False
elif args.student_type == "gpt2":
lowerCAmelCase : Tuple = False
def __A ( a_ : Union[str, Any] ,a_ : Optional[Any] ):
if args.student_type == "roberta":
lowerCAmelCase : List[str] = False
def __A ( ):
lowerCAmelCase : Tuple = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" ,action="store_true" ,help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" ,type=a_ ,required=a_ ,help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" ,type=a_ ,required=a_ ,help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." ,)
parser.add_argument(
"--student_type" ,type=a_ ,choices=["distilbert", "roberta", "gpt2"] ,required=a_ ,help="The student type (DistilBERT, RoBERTa)." ,)
parser.add_argument("--student_config" ,type=a_ ,required=a_ ,help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" ,default=a_ ,type=a_ ,help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" ,choices=["bert", "roberta", "gpt2"] ,required=a_ ,help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" ,type=a_ ,required=a_ ,help="The teacher model." )
parser.add_argument("--temperature" ,default=2.0 ,type=a_ ,help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" ,default=0.5 ,type=a_ ,help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" ,default=0.0 ,type=a_ ,help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." ,)
parser.add_argument("--alpha_clm" ,default=0.5 ,type=a_ ,help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" ,default=0.0 ,type=a_ ,help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" ,default=0.0 ,type=a_ ,help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" ,action="store_true" ,help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" ,default=0.1_5 ,type=a_ ,help="Proportion of tokens for which we need to make a prediction." ,)
parser.add_argument("--word_mask" ,default=0.8 ,type=a_ ,help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" ,default=0.1 ,type=a_ ,help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" ,default=0.1 ,type=a_ ,help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" ,default=0.7 ,type=a_ ,help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." ,)
parser.add_argument("--token_counts" ,type=a_ ,help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" ,action="store_true" ,help="If true, compute the distillation loss only the [MLM] prediction distribution." ,)
parser.add_argument(
"--freeze_pos_embs" ,action="store_true" ,help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." ,)
parser.add_argument(
"--freeze_token_type_embds" ,action="store_true" ,help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." ,)
parser.add_argument("--n_epoch" ,type=a_ ,default=3 ,help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" ,type=a_ ,default=5 ,help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" ,action="store_false" ,help="If true, group sequences that have similar length into the same batch. Default is true." ,)
parser.add_argument(
"--gradient_accumulation_steps" ,type=a_ ,default=5_0 ,help="Gradient accumulation for larger training batches." ,)
parser.add_argument("--warmup_prop" ,default=0.0_5 ,type=a_ ,help="Linear warmup proportion." )
parser.add_argument("--weight_decay" ,default=0.0 ,type=a_ ,help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" ,default=5e-4 ,type=a_ ,help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" ,default=1e-6 ,type=a_ ,help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" ,default=5.0 ,type=a_ ,help="Max gradient norm." )
parser.add_argument("--initializer_range" ,default=0.0_2 ,type=a_ ,help="Random initialization range." )
parser.add_argument(
"--fp16" ,action="store_true" ,help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" ,)
parser.add_argument(
"--fp16_opt_level" ,type=a_ ,default="O1" ,help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) ,)
parser.add_argument("--n_gpu" ,type=a_ ,default=1 ,help="Number of GPUs in the node." )
parser.add_argument("--local_rank" ,type=a_ ,default=-1 ,help="Distributed training - Local rank" )
parser.add_argument("--seed" ,type=a_ ,default=5_6 ,help="Random seed" )
parser.add_argument("--log_interval" ,type=a_ ,default=5_0_0 ,help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" ,type=a_ ,default=4_0_0_0 ,help="Checkpoint interval." )
lowerCAmelCase : Tuple = parser.parse_args()
sanity_checks(a_ )
# ARGS #
init_gpu_params(a_ )
set_seed(a_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path ,"parameters.json" ) ,"w" ) as f:
json.dump(vars(a_ ) ,a_ ,indent=4 )
git_log(args.dump_path )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = MODEL_CLASSES[args.student_type]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase : List[str] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase : Tuple = tokenizer.all_special_tokens.index(a_ )
lowerCAmelCase : List[str] = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
lowerCAmelCase : List[Any] = special_tok_ids
lowerCAmelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file ,"rb" ) as fp:
lowerCAmelCase : Any = pickle.load(a_ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts ,"rb" ) as fp:
lowerCAmelCase : List[Any] = pickle.load(a_ )
lowerCAmelCase : int = np.maximum(a_ ,1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase : List[Any] = 0.0 # do not predict special tokens
lowerCAmelCase : Tuple = torch.from_numpy(a_ )
else:
lowerCAmelCase : Dict = None
lowerCAmelCase : int = LmSeqsDataset(params=a_ ,data=a_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
lowerCAmelCase : Union[str, Any] = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase : List[str] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowerCAmelCase : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights ,config=a_ )
else:
lowerCAmelCase : int = student_model_class(a_ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
lowerCAmelCase : Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=a_ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a_ ,a_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a_ ,a_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase : Tuple = Distiller(
params=a_ ,dataset=a_ ,token_probs=a_ ,student=a_ ,teacher=a_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 525
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase = []
def __A ( a_ : list[list[int]] ,a_ : int ,a_ : int ):
for i in range(len(a_ ) ):
if board[row][i] == 1:
return False
for i in range(len(a_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(a_ ,-1 ,-1 ) ,range(a_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(a_ ,-1 ,-1 ) ,range(a_ ,len(a_ ) ) ):
if board[i][j] == 1:
return False
return True
def __A ( a_ : list[list[int]] ,a_ : int ):
if row >= len(a_ ):
solution.append(a_ )
printboard(a_ )
print()
return True
for i in range(len(a_ ) ):
if is_safe(a_ ,a_ ,a_ ):
lowerCAmelCase : Dict = 1
solve(a_ ,row + 1 )
lowerCAmelCase : Optional[int] = 0
return False
def __A ( a_ : list[list[int]] ):
for i in range(len(a_ ) ):
for j in range(len(a_ ) ):
if board[i][j] == 1:
print("Q" ,end=" " )
else:
print("." ,end=" " )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase = 8
lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 525
| 1
|
def __SCREAMING_SNAKE_CASE ( lowercase_ = 1000000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : List[Any] = FileLock(str(tmpdir / "foo.lock" ) )
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / "foo.lock" ) )
__UpperCAmelCase : Union[str, Any] = 0.01
with locka.acquire():
with pytest.raises(snake_case__ ):
__UpperCAmelCase : List[str] = time.time()
locka.acquire(snake_case__ )
assert time.time() - _start > timeout
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
__UpperCAmelCase : Any = "a" * 1000 + ".lock"
__UpperCAmelCase : Optional[int] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__UpperCAmelCase : Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case__ ):
locka.acquire(0 )
| 382
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: List[Any] = "detr"
lowerCamelCase__: Tuple = ["past_key_values"]
lowerCamelCase__: Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[Any] , __lowerCamelCase: Tuple=True , __lowerCamelCase: Tuple=None , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: Dict=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[Any]=6 , __lowerCamelCase: Optional[int]=20_48 , __lowerCamelCase: Union[str, Any]=8 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: Dict=True , __lowerCamelCase: int="relu" , __lowerCamelCase: Any=2_56 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Any="sine" , __lowerCamelCase: str="resnet50" , __lowerCamelCase: str=True , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Dict=1 , __lowerCamelCase: List[Any]=5 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Any=1 , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Dict=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Optional[Any]=0.1 , **__lowerCamelCase: Tuple , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = backbone_config.get("model_type" )
__UpperCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Optional[Any] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = None, None, None
__UpperCAmelCase : Union[str, Any] = use_timm_backbone
__UpperCAmelCase : Dict = backbone_config
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : List[str] = num_queries
__UpperCAmelCase : Any = d_model
__UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[str] = encoder_attention_heads
__UpperCAmelCase : str = decoder_ffn_dim
__UpperCAmelCase : Any = decoder_layers
__UpperCAmelCase : Optional[Any] = decoder_attention_heads
__UpperCAmelCase : int = dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Dict = init_std
__UpperCAmelCase : List[str] = init_xavier_std
__UpperCAmelCase : Union[str, Any] = encoder_layerdrop
__UpperCAmelCase : List[Any] = decoder_layerdrop
__UpperCAmelCase : List[str] = encoder_layers
__UpperCAmelCase : List[Any] = auxiliary_loss
__UpperCAmelCase : Optional[Any] = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : Dict = use_pretrained_backbone
__UpperCAmelCase : str = dilation
# Hungarian matcher
__UpperCAmelCase : Tuple = class_cost
__UpperCAmelCase : Union[str, Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Tuple = mask_loss_coefficient
__UpperCAmelCase : List[Any] = dice_loss_coefficient
__UpperCAmelCase : Optional[int] = bbox_loss_coefficient
__UpperCAmelCase : List[Any] = giou_loss_coefficient
__UpperCAmelCase : Dict = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: int ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: Optional[Any] ) -> Dict:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Dict[str, any]:
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: int = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: int ) -> int:
return 12
| 382
| 1
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase( __A , __A , __A ):
def get_masked_lm_array(__A ):
UpperCAmelCase = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase = tf.train.load_variable(__A , __A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(__A )
def get_encoder_array(__A ):
UpperCAmelCase = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase = tf.train.load_variable(__A , __A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(__A )
def get_encoder_layer_array(__A , __A ):
UpperCAmelCase = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase = tf.train.load_variable(__A , __A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(__A )
def get_encoder_attention_layer_array(__A , __A , __A ):
UpperCAmelCase = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase = tf.train.load_variable(__A , __A )
UpperCAmelCase = array.reshape(__A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(__A )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase = BertConfig.from_json_file(__A )
UpperCAmelCase = BertForMaskedLM(__A )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase = layer.attention.self
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase = layer.attention.output
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
__A , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase = get_encoder_layer_array(__A , "_attention_layer_norm/gamma" )
UpperCAmelCase = get_encoder_layer_array(__A , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase = layer.intermediate
UpperCAmelCase = get_encoder_layer_array(__A , "_intermediate_dense/kernel" )
UpperCAmelCase = get_encoder_layer_array(__A , "_intermediate_dense/bias" )
# Output
UpperCAmelCase = layer.output
UpperCAmelCase = get_encoder_layer_array(__A , "_output_dense/kernel" )
UpperCAmelCase = get_encoder_layer_array(__A , "_output_dense/bias" )
UpperCAmelCase = get_encoder_layer_array(__A , "_output_layer_norm/gamma" )
UpperCAmelCase = get_encoder_layer_array(__A , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase = model.cls.predictions.transform
UpperCAmelCase = get_masked_lm_array("dense/kernel" )
UpperCAmelCase = get_masked_lm_array("dense/bias" )
UpperCAmelCase = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase = BertPooler(config=__A )
UpperCAmelCase = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__A )
# Integration test - should load without any errors ;)
UpperCAmelCase = BertForMaskedLM.from_pretrained(__A )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCAmelCase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 711
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1
| 0
|
'''simple docstring'''
from math import factorial
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
__a : Tuple = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__a : Dict = float(factorial(_SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(_SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 476
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__a : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__a : Optional[int] = model(__a )['last_hidden_state']
__a : Optional[int] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__a : List[str] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 476
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def __magic_name__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , 2 ).float()
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
a_ = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
a_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE )
a_ = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE )
a_ = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
a_ = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE )
a_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = self.num_labels
a_ = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
a_ = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __magic_name__ ( self ):
a_ = FlaubertModelTester(self )
a_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def __magic_name__ ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def __magic_name__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=_SCREAMING_SNAKE_CASE )
a_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) )
a_ = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
a_ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
a_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a_ = model(_SCREAMING_SNAKE_CASE )[0]
a_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
a_ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 403
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase_ ( unittest.TestCase ):
def __magic_name__ ( self ):
a_ = 10
def __magic_name__ ( self ):
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
a_ , a_ = process_story(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
def __magic_name__ ( self ):
a_ = """"""
a_ , a_ = process_story(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
def __magic_name__ ( self ):
a_ = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
a_ , a_ = process_story(_SCREAMING_SNAKE_CASE )
a_ = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = ["""It was the best of times."""]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 0 ).numpy() , expected.numpy() )
def __magic_name__ ( self ):
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 23 ).numpy() , expected.numpy() )
def __magic_name__ ( self ):
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 1 ).numpy() , expected.numpy() )
def __magic_name__ ( self ):
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
np.testing.assert_array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 403
| 1
|
'''simple docstring'''
from __future__ import annotations
A : Dict = []
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
for i in range(len(snake_case__ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , len(snake_case__ ) ) ):
if board[i][j] == 1:
return False
return True
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
if row >= len(snake_case__ ):
solution.append(snake_case__ )
printboard(snake_case__ )
print()
return True
for i in range(len(snake_case__ ) ):
if is_safe(snake_case__ , snake_case__ , snake_case__ ):
snake_case : Optional[Any] =1
solve(snake_case__ , row + 1 )
snake_case : int =0
return False
def _a ( lowerCamelCase_ ):
for i in range(len(snake_case__ ) ):
for j in range(len(snake_case__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
A : Tuple = 8
A : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 349
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase :int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCamelCase :Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCamelCase :Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase :Optional[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , use_clipped_model_output=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
_UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase :int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase :Union[str, Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 355
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (_snake_case , unittest.TestCase ):
_lowercase : Union[str, Any] = DiTPipeline
_lowercase : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowercase : Any = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowercase : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowercase : List[str] = False
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_snake_case : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase__ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=lowerCAmelCase__ , )
_snake_case : Optional[Any] = AutoencoderKL()
_snake_case : Union[str, Any] = DDIMScheduler()
_snake_case : Dict = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def UpperCAmelCase_ ( self , lowercase__ , lowercase__=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith('''mps''' ):
_snake_case : List[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
_snake_case : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_snake_case : Optional[Any] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : Optional[int] = '''cpu'''
_snake_case : str = self.get_dummy_components()
_snake_case : int = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
_snake_case : Any = pipe(**lowerCAmelCase__ ).images
_snake_case : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_snake_case : List[Any] = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_snake_case : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = torch.manual_seed(0 )
_snake_case : Optional[int] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_snake_case : int = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_snake_case : int = pipe.get_label_ids(lowerCAmelCase__ )
_snake_case : List[str] = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case : str = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_snake_case : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_snake_case : List[Any] = ['''vase''', '''umbrella''']
_snake_case : List[Any] = pipe.get_label_ids(lowerCAmelCase__ )
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : Dict = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 701
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , ) -> Tuple:
__snake_case : Union[str, Any] = parent
__snake_case : Dict = 13
__snake_case : Tuple = 7
__snake_case : Dict = 30
__snake_case : Any = self.seq_length + self.mem_len
__snake_case : int = 15
__snake_case : Union[str, Any] = True
__snake_case : List[Any] = True
__snake_case : List[Any] = 99
__snake_case : Optional[int] = [10, 50, 80]
__snake_case : List[str] = 32
__snake_case : List[Any] = 32
__snake_case : int = 4
__snake_case : Optional[Any] = 8
__snake_case : Union[str, Any] = 128
__snake_case : Optional[Any] = 2
__snake_case : Any = 2
__snake_case : str = None
__snake_case : Optional[int] = 1
__snake_case : str = 0
__snake_case : Union[str, Any] = 3
__snake_case : Tuple = self.vocab_size - 1
__snake_case : List[str] = 0.01
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __snake_case ( self : Any ) -> Union[str, Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : List[str] = TFTransfoXLModel(lowerCamelCase )
__snake_case , __snake_case : str = model(lowerCamelCase ).to_tuple()
__snake_case : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : int ) -> Dict:
__snake_case : int = TFTransfoXLLMHeadModel(lowerCamelCase )
__snake_case , __snake_case : str = model(lowerCamelCase ).to_tuple()
__snake_case : Tuple = {"input_ids": input_ids_a, "labels": lm_labels}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
__snake_case , __snake_case : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
__snake_case : List[str] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[int] ) -> List[str]:
__snake_case : str = TFTransfoXLForSequenceClassification(lowerCamelCase )
__snake_case : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Tuple = config_and_inputs
__snake_case : Dict = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : List[Any] = () if is_tf_available() else ()
__UpperCAmelCase : Dict = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : int = False
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any ) -> Optional[int]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __snake_case ( self : int ) -> int:
__snake_case : List[Any] = TFTransfoXLModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=lowerCamelCase , d_embed=37 )
def __snake_case ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any] ) -> Dict:
self.model_tester.set_seed()
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
self.model_tester.set_seed()
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Tuple:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__snake_case : str = model_class(lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__snake_case : int = model.get_output_embeddings()
assert isinstance(lowerCamelCase , tf.keras.layers.Layer )
__snake_case : Optional[Any] = model.get_bias()
assert name is None
else:
__snake_case : Optional[int] = model.get_output_embeddings()
assert x is None
__snake_case : Tuple = model.get_bias()
assert name is None
def __snake_case ( self : List[Any] ) -> List[Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __snake_case ( self : Optional[int] ) -> Dict:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = TFTransfoXLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def __snake_case ( self : str ) -> Optional[int]:
pass
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : Dict = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__snake_case : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__snake_case : List[str] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__snake_case : Optional[int] = model.generate(lowerCamelCase , max_length=200 , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase )
| 81
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33
| 0
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowercase = getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE="val" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="summarization" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE="" , **_SCREAMING_SNAKE_CASE , ):
lowerCAmelCase_ : List[Any] =str(_SCREAMING_SNAKE_CASE )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Union[str, Any] =Path(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Dict =save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[Any] =AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ).cuda()
if fpaa:
lowerCAmelCase_ : List[str] =model.half()
# determine if we need to increase num_beams
use_task_specific_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # update config with task specific params
lowerCAmelCase_ : Union[str, Any] =generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCAmelCase_ : List[Any] =num_return_sequences
lowerCAmelCase_ : List[Any] =AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCAmelCase_ : List[str] =tokenizer.model_max_length
if prefix is None:
lowerCAmelCase_ : Optional[int] =prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
lowerCAmelCase_ : Dict =SeqaSeqDataset(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_target_length=1024 , type_path=_SCREAMING_SNAKE_CASE , n_obs=_SCREAMING_SNAKE_CASE , prefix=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCAmelCase_ : Optional[int] =ds.make_sortish_sampler(_SCREAMING_SNAKE_CASE , distributed=_SCREAMING_SNAKE_CASE , add_extra_examples=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Any =DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn )
lowerCAmelCase_ : List[Any] =[]
for batch in tqdm(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : str =model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_SCREAMING_SNAKE_CASE , num_beams=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ : Optional[Any] =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[Any] =batch['''ids''']
if num_return_sequences > 1:
lowerCAmelCase_ : Optional[int] =chunks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_SCREAMING_SNAKE_CASE ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results, sampler.num_replicas
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : str =argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_SCREAMING_SNAKE_CASE , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_SCREAMING_SNAKE_CASE , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_SCREAMING_SNAKE_CASE , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--type_path''' , type=_SCREAMING_SNAKE_CASE , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_SCREAMING_SNAKE_CASE , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_SCREAMING_SNAKE_CASE , default=8 , required=_SCREAMING_SNAKE_CASE , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_SCREAMING_SNAKE_CASE , default=1 , required=_SCREAMING_SNAKE_CASE , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_SCREAMING_SNAKE_CASE , default=600 , required=_SCREAMING_SNAKE_CASE , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--tgt_lang''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--prefix''' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
lowerCAmelCase_ : str =time.time()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] =parser.parse_known_args()
lowerCAmelCase_ : Any =parse_numeric_n_bool_cl_kwargs(_SCREAMING_SNAKE_CASE )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
lowerCAmelCase_ : Optional[Any] =Path(args.save_dir + '''_tmp''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) # this handles locking.
lowerCAmelCase_ : Any =list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCAmelCase_ : Any ={}
if args.src_lang is not None:
lowerCAmelCase_ : Any =args.src_lang
if args.tgt_lang is not None:
lowerCAmelCase_ : Dict =args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ , lowerCAmelCase_ : Dict =eval_data_dir(
args.data_dir , _SCREAMING_SNAKE_CASE , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if args.local_rank <= 0:
lowerCAmelCase_ : Union[str, Any] =Path(args.save_dir )
save_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[Any] =gather_results_from_each_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.sync_timeout )
lowerCAmelCase_ : List[str] =combine_partial_results(_SCREAMING_SNAKE_CASE )
if args.num_return_sequences > 1:
lowerCAmelCase_ : Any =save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
lowerCAmelCase_ : Optional[Any] =Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_SCREAMING_SNAKE_CASE ) as f:
lowerCAmelCase_ : Tuple =[x.rstrip() for x in f.readlines()][: len(_SCREAMING_SNAKE_CASE )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCAmelCase_ : int ='''translation''' in args.task
lowerCAmelCase_ : Optional[Any] =calculate_bleu if calc_bleu else calculate_rouge
lowerCAmelCase_ : List[str] ='''bleu''' if calc_bleu else '''rouge'''
lowerCAmelCase_ : Dict =score_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Union[str, Any] =len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[Any] =time.time() - start_time
lowerCAmelCase_ : List[Any] =round(runtime / metrics['''n_obs'''] , 4 )
lowerCAmelCase_ : Union[str, Any] =num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCAmelCase_ : List[Any] =save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(_SCREAMING_SNAKE_CASE , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Optional[Any] =[]
for partial_result in partial_results:
records.extend(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Any =sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x["id"] )
lowerCAmelCase_ : Dict =[x['''pred'''] for x in records]
return preds
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# WAIT FOR lots of .json files
lowerCAmelCase_ : Dict =time.time()
logger.info('''waiting for all nodes to finish''' )
lowerCAmelCase_ : str =None
while (time.time() - start_wait) < timeout:
lowerCAmelCase_ : List[Any] =list(save_dir.glob('''rank_*.json''' ) )
if len(_SCREAMING_SNAKE_CASE ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCAmelCase_ : Optional[int] =lmap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 305
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = '''owlvit_text_model'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=49408 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : List[str]=8 , UpperCamelCase_ : List[str]=16 , UpperCamelCase_ : List[str]="quick_gelu" , UpperCamelCase_ : Any=1E-5 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Optional[Any]=0.0_2 , UpperCamelCase_ : Tuple=1.0 , UpperCamelCase_ : int=0 , UpperCamelCase_ : Optional[int]=49406 , UpperCamelCase_ : str=49407 , **UpperCamelCase_ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase_ : Dict =vocab_size
lowerCAmelCase_ : Any =hidden_size
lowerCAmelCase_ : List[Any] =intermediate_size
lowerCAmelCase_ : Union[str, Any] =num_hidden_layers
lowerCAmelCase_ : List[str] =num_attention_heads
lowerCAmelCase_ : Optional[Any] =max_position_embeddings
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Dict =layer_norm_eps
lowerCAmelCase_ : Dict =attention_dropout
lowerCAmelCase_ : Tuple =initializer_range
lowerCAmelCase_ : str =initializer_factor
@classmethod
def __A ( cls : str , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Any ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowerCAmelCase_ : Optional[Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = '''owlvit_vision_model'''
def __init__( self : int , UpperCamelCase_ : Tuple=768 , UpperCamelCase_ : Union[str, Any]=3072 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : str=768 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : str="quick_gelu" , UpperCamelCase_ : int=1E-5 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : str=0.0_2 , UpperCamelCase_ : Optional[Any]=1.0 , **UpperCamelCase_ : Dict , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase_ : Dict =hidden_size
lowerCAmelCase_ : List[str] =intermediate_size
lowerCAmelCase_ : Union[str, Any] =num_hidden_layers
lowerCAmelCase_ : str =num_attention_heads
lowerCAmelCase_ : Any =num_channels
lowerCAmelCase_ : Optional[Any] =image_size
lowerCAmelCase_ : Union[str, Any] =patch_size
lowerCAmelCase_ : int =hidden_act
lowerCAmelCase_ : Optional[int] =layer_norm_eps
lowerCAmelCase_ : Dict =attention_dropout
lowerCAmelCase_ : Tuple =initializer_range
lowerCAmelCase_ : Tuple =initializer_factor
@classmethod
def __A ( cls : Any , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[Any] ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowerCAmelCase_ : Tuple =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict = '''owlvit'''
_UpperCamelCase : int = True
def __init__( self : List[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Union[str, Any]=2.6_5_9_2 , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : int , ):
super().__init__(**UpperCamelCase_ )
if text_config is None:
lowerCAmelCase_ : Any ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
lowerCAmelCase_ : int ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
lowerCAmelCase_ : List[str] =OwlViTTextConfig(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =OwlViTVisionConfig(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =projection_dim
lowerCAmelCase_ : Optional[Any] =logit_scale_init_value
lowerCAmelCase_ : str =return_dict
lowerCAmelCase_ : Union[str, Any] =1.0
@classmethod
def __A ( cls : str , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Optional[Any] ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __A ( cls : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase_ : List[str] ={}
lowerCAmelCase_ : Optional[int] =text_config
lowerCAmelCase_ : Optional[int] =vision_config
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
def __A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] =copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : str =self.text_config.to_dict()
lowerCAmelCase_ : Any =self.vision_config.to_dict()
lowerCAmelCase_ : str =self.__class__.model_type
return output
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def __A ( self : int ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def __A ( self : int ):
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def __A ( self : Any ):
return 1E-4
def __A ( self : Tuple , UpperCamelCase_ : "ProcessorMixin" , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : Optional["TensorType"] = None , ):
lowerCAmelCase_ : Optional[int] =super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , framework=UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] =super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCamelCase_ , framework=UpperCamelCase_ )
return {**text_input_dict, **image_input_dict}
@property
def __A ( self : List[Any] ):
return 14
| 305
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.