code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_ : str = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowercase_ : int = {
'gpt2': 1_0_2_4,
'gpt2-medium': 1_0_2_4,
'gpt2-large': 1_0_2_4,
'gpt2-xl': 1_0_2_4,
'distilgpt2': 1_0_2_4,
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["input_ids", "attention_mask"]
__a = GPTaTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , **lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Dict= kwargs.pop('''add_bos_token''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE__: Optional[Any]= getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= add_prefix_space
SCREAMING_SNAKE_CASE__: Any= pre_tok_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= add_prefix_space
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__: int= kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__: str= self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[int]:
SCREAMING_SNAKE_CASE__: List[str]= []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE__: Dict= input_ids[-self.model_max_length :]
return input_ids
| 64
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [True] * 1_00_00_01
lowerCamelCase__ : List[Any] = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
lowerCamelCase__ : Optional[Any] = False
i += 1
def UpperCamelCase ( _lowerCAmelCase : int ) -> bool:
return seive[n]
def UpperCamelCase ( _lowerCAmelCase : int ) -> bool:
return any(digit in """02468""" for digit in str(_lowerCAmelCase ) )
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> list[int]:
_UpperCAmelCase : List[Any] = [2] # result already includes the number 2.
for num in range(3, limit + 1, 2 ):
if is_prime(_lowerCAmelCase ) and not contains_an_even_digit(_lowerCAmelCase ):
_UpperCAmelCase : List[Any] = str(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCAmelCase ) )]
if all(is_prime(_lowerCAmelCase ) for i in list_nums ):
result.append(_lowerCAmelCase )
return result
def UpperCamelCase ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 238
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeautifulSoup(requests.get(__UpperCamelCase , params=__UpperCamelCase ).content , """html.parser""" )
UpperCAmelCase__ : Any = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
UpperCAmelCase__ : List[Any] = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
__UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 194
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__UpperCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ : int = bs[:]
UpperCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Tuple = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = set()
UpperCAmelCase__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[Any] = char
return pairs
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] ,A : Any ,A : Dict ,A : Optional[Any]="replace" ,A : Dict="<s>" ,A : str="</s>" ,A : str="</s>" ,A : Dict="<s>" ,A : List[str]="<unk>" ,A : Union[str, Any]="<pad>" ,A : Any="<mask>" ,A : str=False ,**A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,)
with open(A ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ : Tuple = json.load(A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = errors # how to handle errors in decoding
UpperCAmelCase__ : List[str] = bytes_to_unicode()
UpperCAmelCase__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(A ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Any = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowercase ( self : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(A )
UpperCAmelCase__ : int = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase__ : str = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = 0
while i < len(A ):
try:
UpperCAmelCase__ : Optional[Any] = word.index(A ,A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : int = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[str] = tuple(A )
UpperCAmelCase__ : str = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase__ : str = get_pairs(A )
UpperCAmelCase__ : int = """ """.join(A )
UpperCAmelCase__ : List[str] = word
return word
def __lowercase ( self : Optional[Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat ,A ):
UpperCAmelCase__ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
return self.encoder.get(A ,self.encoder.get(self.unk_token ) )
def __lowercase ( self : Dict ,A : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(A )
def __lowercase ( self : Optional[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = """""".join(A )
UpperCAmelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __lowercase ( self : Optional[int] ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Any = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : List[str] = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" )
UpperCAmelCase__ : Any = 0
with open(A ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ : Optional[int] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def __lowercase ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Any ,A : str ,A : List[Any]=False ,**A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Dict = """ """ + text
return (text, kwargs)
def __lowercase ( self : Dict ,A : Union[Dict[str, EncodedInput], BatchEncoding] ,A : Optional[int] = None ,A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,A : Optional[int] = None ,A : Optional[bool] = None ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._pad(
encoded_inputs=A ,max_length=A ,padding_strategy=A ,pad_to_multiple_of=A ,return_attention_mask=A ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Tuple = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
UpperCAmelCase__ : List[Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 194
| 1
|
"""simple docstring"""
import requests
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> None:
lowercase : Optional[int] = {"Content-Type": "application/json"}
lowercase : Optional[int] = requests.post(__snake_case , json={"text": message_body} , headers=__snake_case )
if response.status_code != 200:
lowercase : List[Any] = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 361
|
"""simple docstring"""
def __magic_name__ ( __snake_case : int , __snake_case : int ) -> Any:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def __magic_name__ ( __snake_case : int , __snake_case : int ) -> float:
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 361
| 1
|
import numpy as np
def a (_lowerCAmelCase , _lowerCAmelCase ):
return np.where(vector > 0 , _lowerCAmelCase , (alpha * (np.exp(_lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self: List[Any] , *_lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: Dict=None , **_lowerCamelCase: Dict ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = eval_examples
SCREAMING_SNAKE_CASE_ = post_process_function
def _A ( self: Tuple , _lowerCamelCase: Optional[Dataset] = None , _lowerCamelCase: List[str]=None , _lowerCamelCase: Optional[List[str]] = None , _lowerCamelCase: str = "eval" , **_lowerCamelCase: Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE_ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE_ = gen_kwargs
SCREAMING_SNAKE_CASE_ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ = self.get_eval_dataloader(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
_lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _A ( self: List[str] , _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: str = "test" , **_lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
_lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 89
| 0
|
from manim import *
class __A ( A_ ):
def _snake_case (self ):
lowerCamelCase__ : Any = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ : Dict = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase__ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : Optional[Any] = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : int = VGroup(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : str = Text("""CPU""" , font_size=24 )
lowerCamelCase__ : List[str] = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__magic_name__ )
lowerCamelCase__ : str = [mem.copy() for i in range(4 )]
lowerCamelCase__ : int = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : int = Text("""GPU""" , font_size=24 )
lowerCamelCase__ : Any = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ )
gpu.move_to([-1, -1, 0] )
self.add(__magic_name__ )
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[Any] = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : int = Text("""Model""" , font_size=24 )
lowerCamelCase__ : Tuple = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ )
model.move_to([3, -1.0, 0] )
self.add(__magic_name__ )
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : int = []
lowerCamelCase__ : Any = []
for i, rect in enumerate(__magic_name__ ):
rect.set_stroke(__magic_name__ )
lowerCamelCase__ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__magic_name__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__magic_name__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__magic_name__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__magic_name__ , buff=0.0 )
self.add(__magic_name__ )
model_cpu_arr.append(__magic_name__ )
self.add(*__magic_name__ , *__magic_name__ , *__magic_name__ )
lowerCamelCase__ : Tuple = [mem.copy() for i in range(6 )]
lowerCamelCase__ : List[str] = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : Dict = Text("""Loaded Checkpoint""" , font_size=24 )
lowerCamelCase__ : Any = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(__magic_name__ )
lowerCamelCase__ : int = []
lowerCamelCase__ : Union[str, Any] = []
for i, rect in enumerate(__magic_name__ ):
lowerCamelCase__ : Optional[int] = fill.copy().set_fill(__magic_name__ , opacity=0.7 )
target.move_to(__magic_name__ )
ckpt_arr.append(__magic_name__ )
lowerCamelCase__ : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__magic_name__ )
self.add(*__magic_name__ , *__magic_name__ )
lowerCamelCase__ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : Union[str, Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__magic_name__ , __magic_name__ )
lowerCamelCase__ : List[str] = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__magic_name__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCamelCase__ : Tuple = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ : List[str] = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ : List[Any] = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : List[str] = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : int = VGroup(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0 )
lowerCamelCase__ : Optional[int] = Text("""Disk""" , font_size=24 )
lowerCamelCase__ : Dict = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__magic_name__ , run_time=3 ) , Write(__magic_name__ , run_time=1 ) , Create(__magic_name__ , run_time=1 ) )
lowerCamelCase__ : Union[str, Any] = []
for i, rect in enumerate(__magic_name__ ):
lowerCamelCase__ : Optional[Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__magic_name__ , run_time=1.5 ) )
self.play(*__magic_name__ )
self.play(FadeOut(__magic_name__ ) )
lowerCamelCase__ : List[Any] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__magic_name__ , run_time=3 ) )
self.play(
FadeOut(__magic_name__ , __magic_name__ , *__magic_name__ , *__magic_name__ ) , )
self.wait()
| 157
|
from collections.abc import Generator
from math import sin
def _A (UpperCamelCase : bytes ) ->bytes:
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCamelCase__ : Tuple = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A (UpperCamelCase : int ) ->bytes:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase__ : Union[str, Any] = format(UpperCamelCase , """08x""" )[-8:]
lowerCamelCase__ : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _A (UpperCamelCase : bytes ) ->bytes:
'''simple docstring'''
lowerCamelCase__ : Any = B""""""
for char in message:
bit_string += format(UpperCamelCase , """08b""" ).encode("""utf-8""" )
lowerCamelCase__ : Tuple = format(len(UpperCamelCase ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A (UpperCamelCase : bytes ) ->Generator[list[int], None, None]:
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
lowerCamelCase__ : Optional[Any] = bit_string[pos : pos + 512]
lowerCamelCase__ : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A (UpperCamelCase : int ) ->int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase__ : str = format(UpperCamelCase , """032b""" )
lowerCamelCase__ : Union[str, Any] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return (a + b) % 2**32
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A (UpperCamelCase : bytes ) ->bytes:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = preprocess(UpperCamelCase )
lowerCamelCase__ : int = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase__ : Optional[Any] = 0x6745_2301
lowerCamelCase__ : Optional[Any] = 0xEFCD_AB89
lowerCamelCase__ : str = 0x98BA_DCFE
lowerCamelCase__ : Dict = 0x1032_5476
lowerCamelCase__ : str = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
lowerCamelCase__ : Any = aa
lowerCamelCase__ : List[Any] = ba
lowerCamelCase__ : Optional[int] = ca
lowerCamelCase__ : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase__ : Dict = d ^ (b & (c ^ d))
lowerCamelCase__ : Union[str, Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase__ : Union[str, Any] = c ^ (d & (b ^ c))
lowerCamelCase__ : Tuple = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase__ : Dict = b ^ c ^ d
lowerCamelCase__ : str = (3 * i + 5) % 16
else:
lowerCamelCase__ : int = c ^ (b | not_aa(UpperCamelCase ))
lowerCamelCase__ : Any = (7 * i) % 16
lowerCamelCase__ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase__ : str = d
lowerCamelCase__ : str = c
lowerCamelCase__ : Dict = b
lowerCamelCase__ : Optional[Any] = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase__ : Optional[Any] = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : int = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 1
|
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(__UpperCAmelCase )
UpperCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(__UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return sum(int(__UpperCAmelCase ) for c in str(abs(__UpperCAmelCase ) ) )
def A ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__UpperCAmelCase , __UpperCAmelCase ) -> None:
UpperCAmelCase_ = f"{func.__name__}({value})"
UpperCAmelCase_ = timeit(f"__main__.{call}" , setup='''import __main__''' )
print(f"{call:56} = {func(__UpperCAmelCase )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__UpperCAmelCase , __UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 561
|
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(__UpperCAmelCase , x % y )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(__UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(__UpperCAmelCase , __UpperCAmelCase )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 561
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: List[str] = logging.get_logger(__name__)
UpperCamelCase__: Any = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
UpperCamelCase__: List[Any] = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = EfficientNetConfig()
UpperCAmelCase : str = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase : Any = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase : Tuple = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase : Tuple = '''huggingface/label-files'''
UpperCAmelCase : List[str] = '''imagenet-1k-id2label.json'''
UpperCAmelCase : int = 1000
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ) -> Any:
UpperCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase : Any = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_lowerCAmelCase , )
return preprocessor
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase : List[str] = sorted(set(_lowerCAmelCase ) )
UpperCAmelCase : List[str] = len(_lowerCAmelCase )
UpperCAmelCase : List[str] = {b: str(_lowerCAmelCase ) for b, i in zip(_lowerCAmelCase , range(_lowerCAmelCase ) )}
UpperCAmelCase : Any = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase : Optional[Any] = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase : Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase : Union[str, Any] = '''efficientnet.''' + item[1]
UpperCAmelCase : int = '''classifier.weight'''
UpperCAmelCase : List[str] = '''classifier.bias'''
return key_mapping
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase : Any = torch.from_numpy(_lowerCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase : Optional[Any] = torch.from_numpy(_lowerCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase : Tuple = torch.from_numpy(np.transpose(_lowerCAmelCase ) )
else:
UpperCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_lowerCAmelCase )
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ) -> int:
UpperCAmelCase : Union[str, Any] = model_classes[model_name](
include_top=_lowerCAmelCase , weights='''imagenet''' , input_tensor=_lowerCAmelCase , input_shape=_lowerCAmelCase , pooling=_lowerCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
UpperCAmelCase : Tuple = original_model.trainable_variables
UpperCAmelCase : int = original_model.non_trainable_variables
UpperCAmelCase : List[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase : Union[str, Any] = param.numpy()
UpperCAmelCase : Tuple = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase : str = get_efficientnet_config(_lowerCAmelCase )
UpperCAmelCase : Tuple = EfficientNetForImageClassification(_lowerCAmelCase ).eval()
UpperCAmelCase : List[str] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase : List[Any] = rename_keys(_lowerCAmelCase )
replace_params(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Initialize preprocessor and preprocess input image
UpperCAmelCase : List[Any] = convert_image_processor(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase : List[str] = hf_model(**_lowerCAmelCase )
UpperCAmelCase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase : str = image.img_to_array(_lowerCAmelCase )
UpperCAmelCase : List[Any] = np.expand_dims(_lowerCAmelCase , axis=0 )
UpperCAmelCase : Union[str, Any] = original_model.predict(_lowerCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_lowerCAmelCase ):
os.mkdir(_lowerCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_lowerCAmelCase )
preprocessor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
UpperCAmelCase : List[Any] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_lowerCAmelCase )
hf_model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
UpperCamelCase__: Dict = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 710
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def snake_case_ ( ) -> Optional[int]:
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_lowerCAmelCase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_lowerCAmelCase , default=5 )
parser.add_argument('''--batch_size''' , type=_lowerCAmelCase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_lowerCAmelCase , default=1 )
parser.add_argument('''--freeze''' , type=_lowerCAmelCase , default=_lowerCAmelCase )
parser.add_argument('''--learning_rate''' , type=_lowerCAmelCase , default=5e-4 )
parser.add_argument('''--seed''' , type=_lowerCAmelCase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_lowerCAmelCase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_lowerCAmelCase , default=10 )
parser.add_argument('''--weight_decay''' , type=_lowerCAmelCase , default=0.0_1 )
parser.add_argument('''--output_dir''' , type=_lowerCAmelCase , default='''./results''' )
return parser.parse_args()
UpperCamelCase__: Any = load("accuracy")
def snake_case_ ( _lowerCAmelCase : int ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = eval_pred
UpperCAmelCase : Optional[Any] = np.argmax(_lowerCAmelCase , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : Optional[int] ) -> None:
super().__init__()
UpperCAmelCase : Dict = trainer
def A ( self : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , **__snake_case : List[Any] ) -> Dict:
if control.should_evaluate:
UpperCAmelCase : Optional[int] = deepcopy(__snake_case )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def snake_case_ ( ) -> List[Any]:
UpperCAmelCase : Tuple = get_args()
set_seed(args.seed )
UpperCAmelCase : List[str] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
UpperCAmelCase : Dict = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase : Dict = train_test['''test'''].train_test_split(test_size=0.5 )
UpperCAmelCase : List[Any] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase : Optional[int] = tokenizer.eos_token
UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase : List[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_lowerCAmelCase : Any ):
UpperCAmelCase : List[Any] = tokenizer(example['''src'''] , truncation=_lowerCAmelCase , max_length=1024 )
UpperCAmelCase : Optional[Any] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase : List[str] = train_test_validation.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=train_test_validation['''train'''].column_names , )
UpperCAmelCase : int = DataCollatorWithPadding(tokenizer=_lowerCAmelCase )
UpperCAmelCase : Tuple = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
UpperCAmelCase : int = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 528
| 0
|
import itertools
import string
from collections.abc import Generator, Iterable
def SCREAMING_SNAKE_CASE__ ( _lowercase : Iterable[str] , _lowercase : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
lowercase__ : Union[str, Any] = iter(_UpperCamelCase )
while True:
lowercase__ : str = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) )
if not chunk:
return
yield chunk
def SCREAMING_SNAKE_CASE__ ( _lowercase : str ) -> str:
'''simple docstring'''
lowercase__ : Tuple = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Union[str, Any] = ''
if len(_UpperCamelCase ) < 2:
return dirty
for i in range(len(_UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCamelCase ) & 1:
clean += "X"
return clean
def SCREAMING_SNAKE_CASE__ ( _lowercase : str ) -> list[str]:
'''simple docstring'''
lowercase__ : List[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : Any = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCamelCase )
return table
def SCREAMING_SNAKE_CASE__ ( _lowercase : str , _lowercase : str ) -> str:
'''simple docstring'''
lowercase__ : Dict = generate_table(_UpperCamelCase )
lowercase__ : Optional[int] = prepare_input(_UpperCamelCase )
lowercase__ : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
lowercase__ , lowercase__ : Dict = divmod(table.index(_UpperCamelCase ) , 5 )
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def SCREAMING_SNAKE_CASE__ ( _lowercase : str , _lowercase : str ) -> str:
'''simple docstring'''
lowercase__ : Tuple = generate_table(_UpperCamelCase )
lowercase__ : Optional[int] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
lowercase__ , lowercase__ : str = divmod(table.index(_UpperCamelCase ) , 5 )
lowercase__ , lowercase__ : Dict = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 266
|
import baseaa
def __lowerCAmelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def __lowerCAmelCase ( _UpperCamelCase : bytes ) -> str:
'''simple docstring'''
return baseaa.aaadecode(_UpperCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCAmelCase ( unittest.TestCase , UpperCamelCase_):
def lowerCamelCase__ ( self ):
_snake_case : int = load_tool("text-classification" )
self.tool.setup()
_snake_case : Union[str, Any] = load_tool("text-classification" , remote=snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : int = self.tool("That\'s quite cool" , ["positive", "negative"] )
self.assertEqual(snake_case_ , "positive" )
def lowerCamelCase__ ( self ):
_snake_case : int = self.remote_tool("That\'s quite cool" , ["positive", "negative"] )
self.assertEqual(snake_case_ , "positive" )
def lowerCamelCase__ ( self ):
_snake_case : str = self.tool(text="That\'s quite cool" , labels=["positive", "negative"] )
self.assertEqual(snake_case_ , "positive" )
def lowerCamelCase__ ( self ):
_snake_case : str = self.remote_tool(text="That\'s quite cool" , labels=["positive", "negative"] )
self.assertEqual(snake_case_ , "positive" )
| 718
|
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ ):
_snake_case , _snake_case : Dict = text, pattern
_snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ):
# searches pattern in text and returns index positions
_snake_case : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
_snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a : List[Any] = """ABAABA"""
_a : str = """AB"""
_a : List[Any] = BoyerMooreSearch(text, pattern)
_a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 87
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : str = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''xmod'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=("en_XX",) , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[Any] = hidden_act
__A : Optional[int] = intermediate_size
__A : Dict = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : List[Any] = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : int = initializer_range
__A : Optional[Any] = layer_norm_eps
__A : Any = position_embedding_type
__A : Tuple = use_cache
__A : Dict = classifier_dropout
__A : Union[str, Any] = pre_norm
__A : str = adapter_reduction_factor
__A : Tuple = adapter_layer_norm
__A : str = adapter_reuse_layer_norm
__A : Dict = ln_before_adapter
__A : str = list(_UpperCAmelCase)
__A : Optional[int] = default_language
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 8
|
import numpy as np
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase )
def lowercase_ ( self : List[str] , __lowerCamelCase : str ) -> np.ndarray:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase_ ( self : List[Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
SCREAMING_SNAKE_CASE__ = message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE__ = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape(2 * len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE__ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = encoded_message + letter
return encoded_message
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape((2, len(__lowerCamelCase )) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE__ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = decoded_message + letter
return decoded_message
| 493
| 0
|
def a__ ( _UpperCamelCase : int = 10**9 ):
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }")
| 622
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622
| 1
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = TypeVar("DatasetType", Dataset, IterableDataset)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "first_exhausted" , ) -> DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(__SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(__SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__SCREAMING_SNAKE_CASE )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__SCREAMING_SNAKE_CASE ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__SCREAMING_SNAKE_CASE ).__name__}.''' )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , stopping_strategy=__SCREAMING_SNAKE_CASE )
else:
return _interleave_iterable_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , stopping_strategy=__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , ) -> DatasetType:
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(__SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(__SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__SCREAMING_SNAKE_CASE )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__SCREAMING_SNAKE_CASE ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__SCREAMING_SNAKE_CASE ).__name__}.''' )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , axis=__SCREAMING_SNAKE_CASE )
else:
return _concatenate_iterable_datasets(__SCREAMING_SNAKE_CASE , info=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , axis=__SCREAMING_SNAKE_CASE )
| 579
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE: Optional[int] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: Tuple = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: Any = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: Dict = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: str = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
class a_ ( __lowercase ):
a : Union[str, Any] = 'timm_backbone'
def __init__( self : int , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=3 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Union[str, Any] , ) ->List[str]:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase = backbone
_UpperCAmelCase = num_channels
_UpperCAmelCase = features_only
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = True
_UpperCAmelCase = out_indices if out_indices is not None else (-1,)
| 704
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase )
| 19
| 0
|
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self : Any , _lowercase : int , _lowercase : Optional[Any]=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = data
__UpperCAmelCase = previous
__UpperCAmelCase = next_node
def __str__( self : Optional[int] ):
return F'''{self.data}'''
def a ( self : Tuple ):
return self.data
def a ( self : Any ):
return self.next
def a ( self : Any ):
return self.previous
class _UpperCAmelCase :
def __init__( self : int , _lowercase : List[Any] ):
__UpperCAmelCase = head
def __iter__( self : str ):
return self
def a ( self : Optional[int] ):
if not self.current:
raise StopIteration
else:
__UpperCAmelCase = self.current.get_data()
__UpperCAmelCase = self.current.get_next()
return value
class _UpperCAmelCase :
def __init__( self : int ):
__UpperCAmelCase = None # First node in list
__UpperCAmelCase = None # Last node in list
def __str__( self : List[str] ):
__UpperCAmelCase = self.head
__UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
__UpperCAmelCase = current.get_next()
return " ".join(str(_lowercase ) for node in nodes )
def __contains__( self : int , _lowercase : int ):
__UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
__UpperCAmelCase = current.get_next()
return False
def __iter__( self : Union[str, Any] ):
return LinkedListIterator(self.head )
def a ( self : Optional[Any] ):
if self.head:
return self.head.get_data()
return None
def a ( self : Union[str, Any] ):
if self.tail:
return self.tail.get_data()
return None
def a ( self : Optional[int] , _lowercase : Node ):
if self.head is None:
__UpperCAmelCase = node
__UpperCAmelCase = node
else:
self.insert_before_node(self.head , _lowercase )
def a ( self : Optional[Any] , _lowercase : Node ):
if self.head is None:
self.set_head(_lowercase )
else:
self.insert_after_node(self.tail , _lowercase )
def a ( self : Optional[int] , _lowercase : int ):
__UpperCAmelCase = Node(_lowercase )
if self.head is None:
self.set_head(_lowercase )
else:
self.set_tail(_lowercase )
def a ( self : Optional[Any] , _lowercase : Node , _lowercase : Node ):
__UpperCAmelCase = node
__UpperCAmelCase = node.previous
if node.get_previous() is None:
__UpperCAmelCase = node_to_insert
else:
__UpperCAmelCase = node_to_insert
__UpperCAmelCase = node_to_insert
def a ( self : Optional[Any] , _lowercase : Node , _lowercase : Node ):
__UpperCAmelCase = node
__UpperCAmelCase = node.next
if node.get_next() is None:
__UpperCAmelCase = node_to_insert
else:
__UpperCAmelCase = node_to_insert
__UpperCAmelCase = node_to_insert
def a ( self : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = 1
__UpperCAmelCase = Node(_lowercase )
__UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(_lowercase , _lowercase )
return
current_position += 1
__UpperCAmelCase = node.next
self.insert_after_node(self.tail , _lowercase )
def a ( self : Optional[int] , _lowercase : int ):
__UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
__UpperCAmelCase = node.get_next()
raise Exception('''Node not found''' )
def a ( self : Any , _lowercase : str ):
if (node := self.get_node(_lowercase )) is not None:
if node == self.head:
__UpperCAmelCase = self.head.get_next()
if node == self.tail:
__UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(_lowercase )
@staticmethod
def a ( _lowercase : Node ):
if node.get_next():
__UpperCAmelCase = node.previous
if node.get_previous():
__UpperCAmelCase = node.next
__UpperCAmelCase = None
__UpperCAmelCase = None
def a ( self : str ):
return self.head is None
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 691
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : int = 16
__UpperCamelCase : str = 32
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = 16 ):
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE : Any = DatasetDict(
{
'''train''': dataset['''train'''].select(_lowercase ),
'''validation''': dataset['''train'''].select(_lowercase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : str = 8
else:
SCREAMING_SNAKE_CASE : Any = None
return tokenizer.pad(
_lowercase , padding='''longest''' , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE : str = DataLoader(
tokenized_datasets['''test'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def A ( _lowercase , _lowercase ):
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
SCREAMING_SNAKE_CASE : Dict = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Optional[Any] = config['''lr''']
SCREAMING_SNAKE_CASE : Optional[int] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE : Dict = int(config['''seed'''] )
SCREAMING_SNAKE_CASE : int = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE : str = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE : int = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE : Dict = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE : List[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = get_fold_dataloaders(
_lowercase , _lowercase , _lowercase , _lowercase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.loss
SCREAMING_SNAKE_CASE : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE : Tuple = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
SCREAMING_SNAKE_CASE : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowercase )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE : Dict = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**_lowercase )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(_lowercase , dim=0 )
SCREAMING_SNAKE_CASE : int = torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute(predictions=_lowercase , references=_lowercase )
accelerator.print('''Average test metrics from all folds:''' , _lowercase )
def A ( ):
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_lowercase , default=_lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=_lowercase , default=3 , help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 34
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 1
|
"""simple docstring"""
from functools import lru_cache
def lowerCAmelCase__ ( __magic_name__ ) ->set:
__lowercase = 2
__lowercase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowerCAmelCase__ ( __magic_name__ ) ->int:
return len(unique_prime_factors(__magic_name__ ) )
def lowerCAmelCase__ ( __magic_name__ ) ->bool:
return len(set(__magic_name__ ) ) in (0, 1)
def lowerCAmelCase__ ( __magic_name__ ) ->list:
__lowercase = 2
while True:
# Increment each value of a generated range
__lowercase = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowercase = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( __magic_name__ = 4 ) ->int:
__lowercase = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 118
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ->float:
__lowercase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
__lowercase = np.array(__magic_name__ )
__lowercase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->float:
__lowercase = (1, 2, 1)
__lowercase = (1, 1, 0, 7)
__lowercase = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
__lowercase = model.fit(disp=__magic_name__ , maxiter=6_0_0 , method="nm" )
__lowercase = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->float:
__lowercase = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
__lowercase = regressor.predict(__magic_name__ )
return y_pred[0]
def lowerCAmelCase__ ( __magic_name__ ) ->float:
train_user.sort()
__lowercase = np.percentile(__magic_name__ , 2_5 )
__lowercase = np.percentile(__magic_name__ , 7_5 )
__lowercase = qa - qa
__lowercase = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->bool:
__lowercase = 0
__lowercase = 0
for i in list_vote:
if i > actual_result:
__lowercase = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowercase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
_lowercase = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
_lowercase = Normalizer().fit_transform(data_input_df.values)
# split data
_lowercase = normalize_df[:, 2].tolist()
_lowercase = normalize_df[:, 0].tolist()
_lowercase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowercase = normalize_df[:, [1, 2]].tolist()
_lowercase = x[: len(x) - 1]
_lowercase = x[len(x) - 1 :]
# for linear regression & sarimax
_lowercase = total_date[: len(total_date) - 1]
_lowercase = total_user[: len(total_user) - 1]
_lowercase = total_match[: len(total_match) - 1]
_lowercase = total_date[len(total_date) - 1 :]
_lowercase = total_user[len(total_user) - 1 :]
_lowercase = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowercase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowercase = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 118
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowercase__ : Dict , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : Tuple="attention" ):
'''simple docstring'''
_lowerCAmelCase =params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"]
_lowerCAmelCase =params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"]
_lowerCAmelCase =params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"]
_lowerCAmelCase =params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
_lowerCAmelCase =params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"]
_lowerCAmelCase =params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"]
_lowerCAmelCase =(wi_a, wi_a)
else:
_lowerCAmelCase =params[f"{prefix}/layers_{i}/mlp/wi/kernel"]
_lowerCAmelCase =params[f"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def snake_case_ ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Any ):
'''simple docstring'''
return params[f"{prefix}/layers_{i}/{layer_name}/scale"]
def snake_case_ ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ):
'''simple docstring'''
_lowerCAmelCase =traverse_util.flatten_dict(variables["""target"""] )
_lowerCAmelCase ={"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase ="""encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
_lowerCAmelCase =collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase =old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase =tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase =tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase =tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
_lowerCAmelCase =layer_norm
if split_mlp_wi:
_lowerCAmelCase =wi[0].T
_lowerCAmelCase =wi[1].T
else:
_lowerCAmelCase =wi.T
_lowerCAmelCase =wo.T
_lowerCAmelCase =old[
"""encoder/relpos_bias/rel_embedding"""
].T
_lowerCAmelCase =old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase =tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase =tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase =tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase =tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
_lowerCAmelCase =layer_norm
if split_mlp_wi:
_lowerCAmelCase =wi[0].T
_lowerCAmelCase =wi[1].T
else:
_lowerCAmelCase =wi.T
_lowerCAmelCase =wo.T
_lowerCAmelCase =old["""decoder/decoder_norm/scale"""]
_lowerCAmelCase =old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase =old["""decoder/logits_dense/kernel"""].T
return new
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : bool ):
'''simple docstring'''
_lowerCAmelCase =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase =state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase =state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
_lowerCAmelCase =state_dict["""shared.weight"""]
return state_dict
def snake_case_ ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Tuple ):
'''simple docstring'''
_lowerCAmelCase =checkpoints.load_tax_checkpoint(lowercase__ )
_lowerCAmelCase =convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
_lowerCAmelCase =make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def snake_case_ ( lowercase__ : int , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : bool = False ):
'''simple docstring'''
_lowerCAmelCase =TaConfig.from_json_file(lowercase__ )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase =TaEncoderModel(lowercase__ )
else:
_lowerCAmelCase =TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 149
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__SCREAMING_SNAKE_CASE : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(vocab, range(len(vocab))))
__SCREAMING_SNAKE_CASE : int = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(tmpdirname)
__SCREAMING_SNAKE_CASE : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__SCREAMING_SNAKE_CASE : Optional[Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__SCREAMING_SNAKE_CASE : Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__SCREAMING_SNAKE_CASE : List[Any] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__SCREAMING_SNAKE_CASE : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__SCREAMING_SNAKE_CASE : Optional[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 149
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase__ ( snake_case_ :int = 1_500_000 ):
__UpperCAmelCase = defaultdict(snake_case_ )
__UpperCAmelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case_ , 2 ):
if gcd(snake_case_ , snake_case_ ) > 1:
continue
__UpperCAmelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case_ , limit + 1 , snake_case_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
if weight_type is not None:
UpperCAmelCase__ = getattr(__A, __A ).shape
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "running_mean":
UpperCAmelCase__ = value
elif weight_type == "running_var":
UpperCAmelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ = value
elif weight_type == "inv_freq":
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__A, __A, __A, __A, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(__A )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*", __A )
if "pos_bias_u" in name:
UpperCAmelCase__ = None
elif "pos_bias_v" in name:
UpperCAmelCase__ = None
elif "weight_g" in name:
UpperCAmelCase__ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ = "weight_v"
elif "bias" in name:
UpperCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = "weight"
elif "running_mean" in name:
UpperCAmelCase__ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase__ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase__ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ = "num_batches_tracked"
else:
UpperCAmelCase__ = None
set_recursively(__A, __A, __A, __A, __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None, __A=True ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConformerConfig.from_pretrained(__A, hidden_act="swish" )
else:
UpperCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase__ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(__A, "vocab.json" )
if not os.path.isdir(__A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__A ) )
return
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(__A, "w", encoding="utf-8" ) as vocab_handle:
json.dump(__A, __A )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
__A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=__A, )
UpperCAmelCase__ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__A, return_attention_mask=__A, )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=__A, tokenizer=__A )
processor.save_pretrained(__A )
UpperCAmelCase__ = WavaVecaConformerForCTC(__A )
else:
UpperCAmelCase__ = WavaVecaConformerForPreTraining(__A )
if is_finetuned:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(__A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=__A )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(__A, __A, not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 486
| 0
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :nn.ModuleList , snake_case_ :nn.ModuleList , snake_case_ :List[int] ):
__UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(snake_case_ ) == len(snake_case_ ), F'''{len(snake_case_ )} != {len(snake_case_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_lowercase : str = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_lowercase : str = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple ):
try:
__UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(snake_case_ ) )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :List[str] ):
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(snake_case_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowercase__ ( snake_case_ :Union[str, PreTrainedModel] , snake_case_ :Union[str, Path] = "student" , snake_case_ :Union[int, None] = None , snake_case_ :Union[int, None] = None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[str]=None , **snake_case_ :List[str] , ):
__UpperCAmelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(snake_case_ , snake_case_ ):
AutoTokenizer.from_pretrained(snake_case_ ).save_pretrained(snake_case_ ) # purely for convenience
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).eval()
else:
assert isinstance(snake_case_ , snake_case_ ), F'''teacher must be a model or string got type {type(snake_case_ )}'''
__UpperCAmelCase = teacher.config.to_diff_dict()
try:
__UpperCAmelCase , __UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__UpperCAmelCase = teacher_e
if d is None:
__UpperCAmelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__UpperCAmelCase , __UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__UpperCAmelCase , __UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__UpperCAmelCase = teacher_e
if d is None:
__UpperCAmelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(snake_case_ )
# Copy weights
__UpperCAmelCase = teacher.config_class(**snake_case_ )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(snake_case_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=snake_case_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__UpperCAmelCase , __UpperCAmelCase = list(range(snake_case_ ) ), list(range(snake_case_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(snake_case_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__UpperCAmelCase = pick_layers_to_copy(snake_case_ , snake_case_ )
if d_layers_to_copy is None:
__UpperCAmelCase = pick_layers_to_copy(snake_case_ , snake_case_ )
try:
if hasattr(
snake_case_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , snake_case_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , snake_case_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , snake_case_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , snake_case_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , snake_case_ )
copy_layers(teacher.decoder.block , student.decoder.block , snake_case_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
__UpperCAmelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(snake_case_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 397
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :nn.ModuleList , snake_case_ :nn.ModuleList , snake_case_ :List[int] ):
__UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(snake_case_ ) == len(snake_case_ ), F'''{len(snake_case_ )} != {len(snake_case_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_lowercase : str = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_lowercase : str = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple ):
try:
__UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(snake_case_ ) )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :List[str] ):
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(snake_case_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowercase__ ( snake_case_ :Union[str, PreTrainedModel] , snake_case_ :Union[str, Path] = "student" , snake_case_ :Union[int, None] = None , snake_case_ :Union[int, None] = None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[str]=None , **snake_case_ :List[str] , ):
__UpperCAmelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(snake_case_ , snake_case_ ):
AutoTokenizer.from_pretrained(snake_case_ ).save_pretrained(snake_case_ ) # purely for convenience
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).eval()
else:
assert isinstance(snake_case_ , snake_case_ ), F'''teacher must be a model or string got type {type(snake_case_ )}'''
__UpperCAmelCase = teacher.config.to_diff_dict()
try:
__UpperCAmelCase , __UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__UpperCAmelCase = teacher_e
if d is None:
__UpperCAmelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__UpperCAmelCase , __UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__UpperCAmelCase , __UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__UpperCAmelCase = teacher_e
if d is None:
__UpperCAmelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(snake_case_ )
# Copy weights
__UpperCAmelCase = teacher.config_class(**snake_case_ )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(snake_case_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=snake_case_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__UpperCAmelCase , __UpperCAmelCase = list(range(snake_case_ ) ), list(range(snake_case_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(snake_case_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__UpperCAmelCase = pick_layers_to_copy(snake_case_ , snake_case_ )
if d_layers_to_copy is None:
__UpperCAmelCase = pick_layers_to_copy(snake_case_ , snake_case_ )
try:
if hasattr(
snake_case_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , snake_case_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , snake_case_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , snake_case_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , snake_case_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , snake_case_ )
copy_layers(teacher.decoder.block , student.decoder.block , snake_case_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
__UpperCAmelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(snake_case_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 397
| 1
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _lowerCamelCase : str = "laptop" ) -> DataFrame:
lowerCamelCase_ = F'''https://www.amazon.in/laptop/s?k={product}'''
lowerCamelCase_ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCamelCase_ = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase_ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowerCamelCase_ = item.ha.text
lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href']
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCamelCase_ = 'Not available'
try:
lowerCamelCase_ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCamelCase_ = ''
try:
lowerCamelCase_ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
lowerCamelCase_ = float('nan' )
except AttributeError:
pass
lowerCamelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase_ = ' '
lowerCamelCase_ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = '''headphones'''
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 549
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_SCREAMING_SNAKE_CASE : List[str] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
_SCREAMING_SNAKE_CASE : int = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
_SCREAMING_SNAKE_CASE : Dict = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase ( self : Dict ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=False ) -> Tuple:
if return_pvalue:
lowerCamelCase_ = pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] )}
| 549
| 1
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Optional[Any]:
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : Any = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : Optional[Any] = 8
else:
UpperCAmelCase__ : int = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase__ ) == "1":
UpperCAmelCase__ : List[Any] = 2
# Initialize accelerator
UpperCAmelCase__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : str = config['''lr''']
UpperCAmelCase__ : Dict = int(config['''num_epochs'''] )
UpperCAmelCase__ : Optional[Any] = int(config['''seed'''] )
UpperCAmelCase__ : Optional[int] = int(config['''batch_size'''] )
UpperCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Dict = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
UpperCAmelCase__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Optional[Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
UpperCAmelCase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Any:
UpperCAmelCase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase__ : str = parser.parse_args()
UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 312
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'wavlm'
def __init__( self : Optional[Any] , _A : Union[str, Any]=32 , _A : Any=768 , _A : Dict=12 , _A : Optional[Any]=12 , _A : Optional[Any]=3_072 , _A : Any="gelu" , _A : Union[str, Any]=0.1 , _A : Any=0.1 , _A : Optional[Any]=0.1 , _A : Dict=0.0 , _A : Tuple=0.1 , _A : str=0.1 , _A : Union[str, Any]=0.0_2 , _A : Optional[Any]=1e-5 , _A : str="group" , _A : int="gelu" , _A : Tuple=(512, 512, 512, 512, 512, 512, 512) , _A : Tuple=(5, 2, 2, 2, 2, 2, 2) , _A : int=(10, 3, 3, 3, 3, 2, 2) , _A : Optional[int]=False , _A : str=128 , _A : str=16 , _A : Optional[int]=320 , _A : Any=800 , _A : Any=False , _A : Tuple=True , _A : Optional[Any]=0.0_5 , _A : str=10 , _A : int=2 , _A : Optional[int]=0.0 , _A : int=10 , _A : List[str]=320 , _A : Tuple=2 , _A : Dict=0.1 , _A : Union[str, Any]=100 , _A : Tuple=256 , _A : Dict=256 , _A : List[str]=0.1 , _A : str="mean" , _A : Optional[int]=False , _A : Optional[Any]=False , _A : Any=256 , _A : Union[str, Any]=(512, 512, 512, 512, 1_500) , _A : str=(5, 3, 3, 1, 1) , _A : Union[str, Any]=(1, 2, 3, 1, 1) , _A : str=512 , _A : Optional[int]=80 , _A : List[Any]=0 , _A : Optional[int]=1 , _A : List[str]=2 , _A : Optional[int]=False , _A : str=3 , _A : Dict=2 , _A : List[str]=3 , _A : Optional[Any]=None , **_A : Tuple , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = feat_extract_norm
UpperCAmelCase__ : str = feat_extract_activation
UpperCAmelCase__ : Tuple = list(_A )
UpperCAmelCase__ : Union[str, Any] = list(_A )
UpperCAmelCase__ : Optional[Any] = list(_A )
UpperCAmelCase__ : Optional[Any] = conv_bias
UpperCAmelCase__ : List[Any] = num_buckets
UpperCAmelCase__ : Optional[Any] = max_bucket_distance
UpperCAmelCase__ : int = num_conv_pos_embeddings
UpperCAmelCase__ : Optional[Any] = num_conv_pos_embedding_groups
UpperCAmelCase__ : Any = len(self.conv_dim )
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : str = feat_proj_dropout
UpperCAmelCase__ : Tuple = final_dropout
UpperCAmelCase__ : List[str] = layerdrop
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Any = num_ctc_classes
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Optional[int] = do_stable_layer_norm
UpperCAmelCase__ : Union[str, Any] = use_weighted_layer_sum
UpperCAmelCase__ : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : List[str] = apply_spec_augment
UpperCAmelCase__ : str = mask_time_prob
UpperCAmelCase__ : int = mask_time_length
UpperCAmelCase__ : Optional[int] = mask_time_min_masks
UpperCAmelCase__ : int = mask_feature_prob
UpperCAmelCase__ : Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase__ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase__ : List[str] = num_codevector_groups
UpperCAmelCase__ : Optional[int] = contrastive_logits_temperature
UpperCAmelCase__ : Optional[int] = num_negatives
UpperCAmelCase__ : List[Any] = codevector_dim
UpperCAmelCase__ : Union[str, Any] = proj_codevector_dim
UpperCAmelCase__ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase__ : str = ctc_loss_reduction
UpperCAmelCase__ : Optional[Any] = ctc_zero_infinity
# adapter
UpperCAmelCase__ : Union[str, Any] = add_adapter
UpperCAmelCase__ : List[Any] = adapter_kernel_size
UpperCAmelCase__ : Union[str, Any] = adapter_stride
UpperCAmelCase__ : Tuple = num_adapter_layers
UpperCAmelCase__ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase__ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ : Optional[int] = list(_A )
UpperCAmelCase__ : str = list(_A )
UpperCAmelCase__ : Any = list(_A )
UpperCAmelCase__ : List[Any] = xvector_output_dim
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 312
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = 'funnel'
lowerCamelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] , snake_case_ : List[Any]=3_0522 , snake_case_ : List[str]=[4, 4, 4] , snake_case_ : int=None , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : List[Any]=64 , snake_case_ : List[str]=3072 , snake_case_ : List[Any]="gelu_new" , snake_case_ : List[str]=0.1 , snake_case_ : Any=0.1 , snake_case_ : str=0.0 , snake_case_ : Any=0.1 , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=1E-9 , snake_case_ : str="mean" , snake_case_ : Tuple="relative_shift" , snake_case_ : Optional[Any]=True , snake_case_ : List[str]=True , snake_case_ : int=True , **snake_case_ : Optional[int] , ):
"""simple docstring"""
A : List[Any] = vocab_size
A : int = block_sizes
A : Dict = [1] * len(snake_case_ ) if block_repeats is None else block_repeats
assert len(snake_case_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A : Any = num_decoder_layers
A : Any = d_model
A : Union[str, Any] = n_head
A : Any = d_head
A : List[Any] = d_inner
A : str = hidden_act
A : Tuple = hidden_dropout
A : int = attention_dropout
A : Union[str, Any] = activation_dropout
A : Dict = initializer_range
A : Any = initializer_std
A : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
A : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
A : str = attention_type
A : Optional[int] = separate_cls
A : Dict = truncate_seq
A : Any = pool_q_only
super().__init__(**snake_case_ )
@property
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : Tuple ):
"""simple docstring"""
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 256
|
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
A : Any = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCamelCase ( lowerCamelCase_: int = 100 ):
'''simple docstring'''
A : Dict = 1
A : Union[str, Any] = 2
for i in range(2 , max_n + 1 ):
A : List[Any] = pre_numerator
A : Tuple = 2 * i // 3 if i % 3 == 0 else 1
A : str = cur_numerator
A : str = e_cont * pre_numerator + temp
return sum_digits(lowerCamelCase_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 256
| 1
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( a : Optional[Any] ) ->str:
snake_case = torch.exp(a )
snake_case = torch.sum(a , dim=1 ) # sum of exp(x_i)
snake_case = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class _lowercase ( nn.Module ):
def __init__( self , A__ ) -> Any:
super().__init__()
snake_case = config.output_attentions
snake_case = config.output_hidden_states
snake_case = nn.ModuleList([BertLayer(A__ ) for _ in range(config.num_hidden_layers )] )
snake_case = nn.ModuleList([BertHighway(A__ ) for _ in range(config.num_hidden_layers )] )
snake_case = [-1 for _ in range(config.num_hidden_layers )]
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
if (type(A__ ) is float) or (type(A__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case = x
else:
snake_case = x
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
snake_case = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCamelCase ( self , A__ , A__=None , A__=None , A__=None , A__=None , ) -> int:
snake_case = ()
snake_case = ()
snake_case = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
snake_case = layer_module(
A__ , A__ , head_mask[i] , A__ , A__ )
snake_case = layer_outputs[0]
if self.output_attentions:
snake_case = all_attentions + (layer_outputs[1],)
snake_case = (hidden_states,)
if self.output_hidden_states:
snake_case = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case = current_outputs + (all_attentions,)
snake_case = self.highway[i](A__ )
# logits, pooled_output
if not self.training:
snake_case = highway_exit[0]
snake_case = entropy(A__ )
snake_case = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ , i + 1 )
else:
snake_case = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
snake_case = (hidden_states,)
if self.output_hidden_states:
snake_case = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case = outputs + (all_attentions,)
snake_case = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __a , )
class _lowercase ( __a ):
def __init__( self , A__ ) -> str:
super().__init__(A__ )
snake_case = config
snake_case = BertEmbeddings(A__ )
snake_case = DeeBertEncoder(A__ )
snake_case = BertPooler(A__ )
self.init_weights()
def UpperCamelCase ( self ) -> Dict:
self.encoder.init_highway_pooler(self.pooler )
def UpperCamelCase ( self ) -> str:
return self.embeddings.word_embeddings
def UpperCamelCase ( self , A__ ) -> Any:
snake_case = value
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__ )
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
snake_case = input_ids.size()
elif inputs_embeds is not None:
snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case = torch.ones(A__ , device=A__ )
if encoder_attention_mask is None:
snake_case = torch.ones(A__ , device=A__ )
if token_type_ids is None:
snake_case = torch.zeros(A__ , dtype=torch.long , device=A__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case = self.get_extended_attention_mask(A__ , A__ , A__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case = encoder_attention_mask[:, None, None, :]
snake_case = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case = self.get_head_mask(A__ , self.config.num_hidden_layers )
snake_case = self.embeddings(
input_ids=A__ , position_ids=A__ , token_type_ids=A__ , inputs_embeds=A__ )
snake_case = self.encoder(
A__ , attention_mask=A__ , head_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
snake_case = encoder_outputs[0]
snake_case = self.pooler(A__ )
snake_case = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowercase ( __a ):
def __init__( self , A__ , A__ ) -> Any:
snake_case = message
snake_case = exit_layer # start from 1!
class _lowercase ( nn.Module ):
def __init__( self , A__ ) -> str:
super().__init__()
snake_case = BertPooler(A__ )
snake_case = nn.Dropout(config.hidden_dropout_prob )
snake_case = nn.Linear(config.hidden_size , config.num_labels )
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
# Pooler
snake_case = encoder_outputs[0]
snake_case = self.pooler(A__ )
# "return" pooler_output
# BertModel
snake_case = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case = bmodel_output[1]
snake_case = self.dropout(A__ )
snake_case = self.classifier(A__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __a , )
class _lowercase ( __a ):
def __init__( self , A__ ) -> Union[str, Any]:
super().__init__(A__ )
snake_case = config.num_labels
snake_case = config.num_hidden_layers
snake_case = DeeBertModel(A__ )
snake_case = nn.Dropout(config.hidden_dropout_prob )
snake_case = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=-1 , A__=False , ) -> Tuple:
snake_case = self.num_layers
try:
snake_case = self.bert(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case = outputs[1]
snake_case = self.dropout(A__ )
snake_case = self.classifier(A__ )
snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case = e.message
snake_case = e.exit_layer
snake_case = outputs[0]
if not self.training:
snake_case = entropy(A__ )
snake_case = []
snake_case = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case = MSELoss()
snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case = []
for highway_exit in outputs[-1]:
snake_case = highway_exit[0]
if not self.training:
highway_logits_all.append(A__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case = MSELoss()
snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case = CrossEntropyLoss()
snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A__ )
if train_highway:
snake_case = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case = (loss,) + outputs
if not self.training:
snake_case = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 711
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
lowercase = "hopper-medium-v2"
lowercase = gym.make(env_name)
lowercase = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
lowercase = env.reset()
lowercase = 0
lowercase = 0
lowercase = 1000
lowercase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase = pipeline(obs, planning_horizon=32)
# execute action in environment
lowercase , lowercase , lowercase , lowercase = env.step(denorm_actions)
lowercase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 198
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int = 2_00 ):
lowercase_ : str = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
lowercase_ : Dict = [0] * (pence + 1)
lowercase_ : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 425
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase_ : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowerCamelCase_ : int = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowerCamelCase_ : str = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __magic_name__( _A , _A ):
'''simple docstring'''
return float((preds == labels).mean() )
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = simple_accuracy(_A , _A )
UpperCamelCase__ = float(fa_score(y_true=_A , y_pred=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = float(pearsonr(_A , _A )[0] )
UpperCamelCase__ = float(spearmanr(_A , _A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def A ( self : List[str] , lowercase : Dict , lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 704
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Dict = (DPMSolverSinglestepScheduler,)
__a : List[Any] = (("num_inference_steps", 25),)
def A ( self : List[Any] , **lowercase : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowercase )
return config
def A ( self : List[Any] , lowercase : Tuple=0 , **lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , lowercase )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config(**lowercase )
UpperCamelCase__ = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCamelCase__ = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ , UpperCamelCase__ = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCamelCase__ = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self : List[str] ) -> Any:
'''simple docstring'''
pass
def A ( self : Any , lowercase : List[Any]=0 , **lowercase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , lowercase )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCamelCase__ = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCamelCase__ = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self : Union[str, Any] , lowercase : Any=None , **lowercase : List[Any] ) -> Dict:
'''simple docstring'''
if scheduler is None:
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**lowercase )
UpperCamelCase__ = scheduler_class(**lowercase )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**lowercase )
UpperCamelCase__ = scheduler_class(**lowercase )
UpperCamelCase__ = 1_0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = model(lowercase , lowercase )
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ = 5_0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
UpperCamelCase__ = model(lowercase , lowercase )
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ = self.full_loop(scheduler=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
UpperCamelCase__ = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase__ = self.full_loop(scheduler=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type="""dpmsolver++""" , solver_order=lowercase , solver_type=lowercase , )
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
UpperCamelCase__ = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
self.check_over_configs(variance_type=lowercase )
self.check_over_configs(variance_type="""learned_range""" )
def A ( self : Tuple ) -> int:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def A ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(use_karras_sigmas=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def A ( self : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def A ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
UpperCamelCase__ = scheduler_class(**lowercase )
UpperCamelCase__ = 1_0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = model(lowercase , lowercase )
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 265
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''timm_backbone'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = backbone
snake_case : Dict = num_channels
snake_case : Optional[int] = features_only
snake_case : Any = use_pretrained_backbone
snake_case : Union[str, Any] = True
snake_case : Optional[int] = out_indices if out_indices is not None else (-1,)
| 36
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38
| 0
|
import numpy as np
from PIL import Image
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = 0
# compute the shape of the output matrix
UpperCAmelCase_ : Dict = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCAmelCase_ : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCAmelCase_ : str = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : str = 0
return updated_arr
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Any = 0
# compute the shape of the output matrix
UpperCAmelCase_ : Union[str, Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCAmelCase_ : Tuple = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCAmelCase_ : Union[str, Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__a = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 702
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
UpperCAmelCase_ : int = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase_ : List[str] = model.generate(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase_ : Optional[Any] = model_reloaded.generate(**_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase_ : Any = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model.reverse_bettertransformer()
model.save_pretrained(_SCREAMING_SNAKE_CASE )
| 300
| 0
|
_SCREAMING_SNAKE_CASE : Union[str, Any] = 8.3144598
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_SCREAMING_SNAKE_CASE : List[str] = 3_00
_SCREAMING_SNAKE_CASE : Tuple = 28
_SCREAMING_SNAKE_CASE : List[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 550
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'deit'
def __init__( self , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-12 , __snake_case=2_2_4 , __snake_case=1_6 , __snake_case=3 , __snake_case=True , __snake_case=1_6 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = qkv_bias
snake_case = encoder_stride
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-4
| 550
| 1
|
A = 'Tobias Carryer'
from time import time
class __a :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=int(time() ) ): # noqa: B008
SCREAMING_SNAKE_CASE_ : List[str] = multiplier
SCREAMING_SNAKE_CASE_ : Dict = increment
SCREAMING_SNAKE_CASE_ : Optional[Any] = modulo
SCREAMING_SNAKE_CASE_ : List[Any] = seed
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 715
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if "cls_token" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('cls_token' , 'vit.embeddings.cls_token' )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('mask_token' , 'decoder.mask_token' )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('patch_embed.norm' , 'vit.embeddings.norm' )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('decoder_blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('blocks' , 'vit.encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('norm.weight' , 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('norm.bias' , 'vit.layernorm.bias' )
return name
def _lowerCamelCase( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Tuple = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : Optional[int] = key.split('.' )
SCREAMING_SNAKE_CASE_ : List[Any] = int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_ : Any = config.decoder_hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'decoder.decoder_layers.'
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : Any = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_ : str = val[:dim]
SCREAMING_SNAKE_CASE_ : List[Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : List[Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.hidden_size
SCREAMING_SNAKE_CASE_ : str = 'vit.encoder.layer.'
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Any = val[:dim, :]
SCREAMING_SNAKE_CASE_ : Tuple = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : int = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = val[:dim]
SCREAMING_SNAKE_CASE_ : Dict = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : str = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = val
return orig_state_dict
def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Any = 1024
SCREAMING_SNAKE_CASE_ : Any = 4096
SCREAMING_SNAKE_CASE_ : List[str] = 24
SCREAMING_SNAKE_CASE_ : List[str] = 16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : str = 14
SCREAMING_SNAKE_CASE_ : List[str] = 1280
SCREAMING_SNAKE_CASE_ : int = 5120
SCREAMING_SNAKE_CASE_ : Optional[int] = 32
SCREAMING_SNAKE_CASE_ : List[str] = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = ViTMAEForPreTraining(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_ : int = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
SCREAMING_SNAKE_CASE_ : Dict = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 97
| 0
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : Any=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """The csv file to plot."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
a_ = list_field(
default=_UpperCamelCase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def a_ ( lowerCAmelCase_ : Tuple ):
try:
int(lowerCAmelCase_ )
return True
except ValueError:
return False
def a_ ( lowerCAmelCase_ : Dict ):
try:
float(lowerCAmelCase_ )
return True
except ValueError:
return False
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = args
__lowerCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowerCAmelCase = csv.DictReader(lowerCAmelCase_ )
for row in reader:
__lowerCAmelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowerCAmelCase = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowerCAmelCase = float(row['result'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = plt.subplots()
__lowerCAmelCase = 'Time usage' if self.args.is_time else 'Memory usage'
__lowerCAmelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowerCAmelCase = sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowerCAmelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowerCAmelCase = self.result_dict[model_name]['result']
((__lowerCAmelCase) , (__lowerCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowerCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowerCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase_ , )
else:
__lowerCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowerCAmelCase) , (__lowerCAmelCase)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowerCAmelCase = np.asarray(lowerCAmelCase_ , lowerCAmelCase_ )[: len(lowerCAmelCase_ )]
plt.scatter(
lowerCAmelCase_ , lowerCAmelCase_ , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(lowerCAmelCase_ , lowerCAmelCase_ , '--' )
title_str += f""" {label_model_name} vs."""
__lowerCAmelCase = title_str[:-4]
__lowerCAmelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase_ )
plt.xlabel(lowerCAmelCase_ )
plt.ylabel(lowerCAmelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a_ ( ):
__lowerCAmelCase = HfArgumentParser(lowerCAmelCase_ )
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
__lowerCAmelCase = Plot(args=lowerCAmelCase_ )
plot.plot()
if __name__ == "__main__":
main()
| 53
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : List[str] = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : str = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Any = processor.post_process_masks(_a , _a , _a )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = processor.post_process_masks(
_a , torch.tensor(_a ) , torch.tensor(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Dict = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = [[1, 0], [0, 1]]
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
@require_vision
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[int] = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Any = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(_a , _a , _a , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , tf.convert_to_tensor(_a ) , tf.convert_to_tensor(_a ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE__ : str = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = [tf.convert_to_tensor(_a )]
SCREAMING_SNAKE_CASE__ : Dict = [torch.tensor(_a )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[str] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : List[Any] = processor.post_process_masks(
_a , _a , _a , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ : List[str] = processor.post_process_masks(
_a , _a , _a , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processor(_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : str = processor(images=_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
| 680
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __A ( _A ):
"""simple docstring"""
__a = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __A ( _A , _A ):
"""simple docstring"""
__a = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __A ( _A ):
"""simple docstring"""
__a = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def __A ( ):
"""simple docstring"""
__a = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = "imagenet-1k-id2label.json"
__a = 1000
__a = "huggingface/label-files"
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type="dataset" ) ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_A )
__a = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__a = image_size
__a = torch.load(_A , map_location=torch.device("cpu" ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_A )
__a = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_A , _A )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 714
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE : List[Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class A_ :
_SCREAMING_SNAKE_CASE = PegasusConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Tuple=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def _UpperCAmelCase ( self : Optional[int] ):
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__a = np.concatenate([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_pegasus_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
__a = 20
__a = model_class_name(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
__a = 20
__a = model_class_name(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __A ( _A , _A , _A , _A=None , _A=None , ):
"""simple docstring"""
if attention_mask is None:
__a = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__a = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class A_ ( a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _UpperCAmelCase ( self : int ):
__a = FlaxPegasusModelTester(self )
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Optional[int] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest("JIT Enabled" ):
__a = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self : List[str] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = model_class(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest("JIT Enabled" ):
__a = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__SCREAMING_SNAKE_CASE )
__a = np.ones((1, 1) )
__a = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : str ):
__a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__a = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="np" , truncation=__SCREAMING_SNAKE_CASE , max_length=5_12 , padding=__SCREAMING_SNAKE_CASE )
__a = model.generate(**__SCREAMING_SNAKE_CASE , num_beams=2 ).sequences
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
assert tgt_text == decoded
| 525
| 0
|
import sys
__SCREAMING_SNAKE_CASE : Optional[int] =(
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCamelCase__ ( lowerCAmelCase__ = N ):
lowercase = -sys.maxsize - 1
for i in range(len(lowerCAmelCase__ ) - 12 ):
lowercase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 428
|
def UpperCamelCase__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase__ )
if number < 1:
lowercase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase__ )
lowercase = 1
for i in range(1 ,lowerCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __magic_name__ (SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowercase : Dict = 42
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Dict , _a:Optional[int]=3 , _a:Dict=3 , _a:Tuple=("DownEncoderBlock2D",) , _a:List[str]=(64,) , _a:List[Any]=2 , _a:Optional[int]=32 , _a:List[str]="silu" , _a:Tuple=True , ):
super().__init__()
snake_case__ = layers_per_block
snake_case__ = torch.nn.Convad(
snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ = None
snake_case__ = nn.ModuleList([] )
# down
snake_case__ = block_out_channels[0]
for i, down_block_type in enumerate(snake_case__ ):
snake_case__ = output_channel
snake_case__ = block_out_channels[i]
snake_case__ = i == len(snake_case__ ) - 1
snake_case__ = get_down_block(
snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , )
self.down_blocks.append(snake_case__ )
# mid
snake_case__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# out
snake_case__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1e-6 )
snake_case__ = nn.SiLU()
snake_case__ = 2 * out_channels if double_z else out_channels
snake_case__ = nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 )
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict ):
snake_case__ = x
snake_case__ = self.conv_in(snake_case__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a:Any ):
def custom_forward(*_a:Dict ):
return module(*snake_case__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ )
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ )
else:
for down_block in self.down_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ )
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ )
else:
# down
for down_block in self.down_blocks:
snake_case__ = down_block(snake_case__ )
# middle
snake_case__ = self.mid_block(snake_case__ )
# post-process
snake_case__ = self.conv_norm_out(snake_case__ )
snake_case__ = self.conv_act(snake_case__ )
snake_case__ = self.conv_out(snake_case__ )
return sample
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Optional[Any] , _a:List[Any]=3 , _a:Tuple=3 , _a:Any=("UpDecoderBlock2D",) , _a:Union[str, Any]=(64,) , _a:int=2 , _a:str=32 , _a:List[str]="silu" , _a:Optional[int]="group" , ):
super().__init__()
snake_case__ = layers_per_block
snake_case__ = nn.Convad(
snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ = None
snake_case__ = nn.ModuleList([] )
snake_case__ = in_channels if norm_type == "spatial" else None
# mid
snake_case__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# up
snake_case__ = list(reversed(snake_case__ ) )
snake_case__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
snake_case__ = output_channel
snake_case__ = reversed_block_out_channels[i]
snake_case__ = i == len(snake_case__ ) - 1
snake_case__ = get_up_block(
snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , )
self.up_blocks.append(snake_case__ )
snake_case__ = output_channel
# out
if norm_type == "spatial":
snake_case__ = SpatialNorm(block_out_channels[0] , snake_case__ )
else:
snake_case__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1e-6 )
snake_case__ = nn.SiLU()
snake_case__ = nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 )
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Dict=None ):
snake_case__ = z
snake_case__ = self.conv_in(snake_case__ )
snake_case__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a:Tuple ):
def custom_forward(*_a:Dict ):
return module(*snake_case__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
snake_case__ = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
else:
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ )
snake_case__ = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ )
else:
# middle
snake_case__ = self.mid_block(snake_case__ , snake_case__ )
snake_case__ = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
snake_case__ = up_block(snake_case__ , snake_case__ )
# post-process
if latent_embeds is None:
snake_case__ = self.conv_norm_out(snake_case__ )
else:
snake_case__ = self.conv_norm_out(snake_case__ , snake_case__ )
snake_case__ = self.conv_act(snake_case__ )
snake_case__ = self.conv_out(snake_case__ )
return sample
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Any , _a:int , _a:List[str] , _a:List[Any] , _a:List[Any]=None , _a:Any="random" , _a:Optional[Any]=False , _a:Optional[int]=True ):
super().__init__()
snake_case__ = n_e
snake_case__ = vq_embed_dim
snake_case__ = beta
snake_case__ = legacy
snake_case__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
snake_case__ = self.used.shape[0]
snake_case__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ = self.re_embed
snake_case__ = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
snake_case__ = n_e
snake_case__ = sane_index_shape
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[int] ):
snake_case__ = inds.shape
assert len(snake_case__ ) > 1
snake_case__ = inds.reshape(ishape[0] , -1 )
snake_case__ = self.used.to(snake_case__ )
snake_case__ = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ = match.argmax(-1 )
snake_case__ = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ = self.unknown_index
return new.reshape(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Optional[Any] ):
snake_case__ = inds.shape
assert len(snake_case__ ) > 1
snake_case__ = inds.reshape(ishape[0] , -1 )
snake_case__ = self.used.to(snake_case__ )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ = 0 # simply set to zero
snake_case__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ )
return back.reshape(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[Any] ):
snake_case__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ = torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 )
snake_case__ = self.embedding(snake_case__ ).view(z.shape )
snake_case__ = None
snake_case__ = None
# compute loss for embedding
if not self.legacy:
snake_case__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ = self.remap_to_used(snake_case__ )
snake_case__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[str] , _a:Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ = self.unmap_to_all(snake_case__ )
snake_case__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ = self.embedding(snake_case__ )
if shape is not None:
snake_case__ = z_q.view(snake_case__ )
# reshape back to match original input shape
snake_case__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __magic_name__ (SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:Optional[int] , _a:Any=False ):
snake_case__ = parameters
snake_case__ = torch.chunk(snake_case__ , 2 , dim=1 )
snake_case__ = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ = deterministic
snake_case__ = torch.exp(0.5 * self.logvar )
snake_case__ = torch.exp(self.logvar )
if self.deterministic:
snake_case__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int = None ):
snake_case__ = randn_tensor(
self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ = self.mean + self.std * sample
return x
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Dict=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[str] , _a:Optional[int]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return self.mean
| 719
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = 'trajectory_transformer'
__lowercase : int = ['past_key_values']
__lowercase : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self:List[str] , _a:Any=1_00 , _a:List[str]=5 , _a:Union[str, Any]=1 , _a:Any=1 , _a:Union[str, Any]=2_49 , _a:List[Any]=6 , _a:Tuple=17 , _a:str=25 , _a:List[str]=4 , _a:str=4 , _a:Dict=1_28 , _a:str=0.1 , _a:str=0.1 , _a:Dict=0.1 , _a:str=0.0006 , _a:Tuple=5_12 , _a:Any=0.02 , _a:Optional[int]=1e-12 , _a:List[str]=1 , _a:Any=True , _a:List[Any]=1 , _a:Dict=5_02_56 , _a:List[Any]=5_02_56 , **_a:Optional[Any] , ):
snake_case__ = vocab_size
snake_case__ = action_weight
snake_case__ = reward_weight
snake_case__ = value_weight
snake_case__ = max_position_embeddings
snake_case__ = block_size
snake_case__ = action_dim
snake_case__ = observation_dim
snake_case__ = transition_dim
snake_case__ = learning_rate
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = n_embd
snake_case__ = embd_pdrop
snake_case__ = attn_pdrop
snake_case__ = resid_pdrop
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = kaiming_initializer_range
snake_case__ = use_cache
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 208
| 0
|
"""simple docstring"""
def A_ ( __lowercase , __lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def A_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 357
|
"""simple docstring"""
from collections.abc import Callable
class a__ :
def __init__( self :Tuple , _lowerCamelCase :Callable | None = None ):
'''simple docstring'''
UpperCamelCase_ : list =[]
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase_ : dict ={}
# Stores current size of heap.
UpperCamelCase_ : Any =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase_ : List[str] =key or (lambda _lowerCamelCase : x)
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : List[str] =int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =self.arr[j], self.arr[i]
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : int =self._left(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self._right(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =i
if left is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Optional[int] =left
if right is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : List[Any] =right
return valid_parent
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Dict =self._parent(_lowerCamelCase )
while parent is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : Dict =parent, self._parent(_lowerCamelCase )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self._get_valid_parent(_lowerCamelCase )
while valid_parent != index:
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : int =valid_parent, self._get_valid_parent(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : List[Any] =self.pos_map[item]
UpperCamelCase_ : int =[item, self.key(_lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : Any =self.pos_map[item]
del self.pos_map[item]
UpperCamelCase_ : Dict =self.arr[self.size - 1]
UpperCamelCase_ : Optional[int] =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_lowerCamelCase )] )
else:
UpperCamelCase_ : str =[item, self.key(_lowerCamelCase )]
UpperCamelCase_ : Optional[int] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return self.arr[0] if self.size else None
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : int =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
| 1
|
"""simple docstring"""
import sys
import turtle
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : int , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 715
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
SCREAMING_SNAKE_CASE_ = {
'''ctrl''': 256,
}
SCREAMING_SNAKE_CASE_ = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def lowercase__ ( lowerCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
UpperCAmelCase = set(lowerCAmelCase )
return pairs
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = CONTROL_CODES
def __init__( self , lowercase_ , lowercase_ , lowercase_="<unk>" , **lowercase_ ) -> Optional[int]:
super().__init__(unk_token=lowercase_ , **lowercase_ )
with open(lowercase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase = json.load(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {}
@property
def a_ ( self ) -> Union[str, Any]:
return len(self.encoder )
def a_ ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , lowercase_ ) -> int:
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCAmelCase = get_pairs(lowercase_ )
if not pairs:
return token
while True:
UpperCAmelCase = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(lowercase_ ):
try:
UpperCAmelCase = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = new_word
if len(lowercase_ ) == 1:
break
else:
UpperCAmelCase = get_pairs(lowercase_ )
UpperCAmelCase = '@@ '.join(lowercase_ )
UpperCAmelCase = word[:-4]
UpperCAmelCase = word
return word
def a_ ( self , lowercase_ ) -> List[Any]:
UpperCAmelCase = []
UpperCAmelCase = re.findall(R'\S+\n?' , lowercase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase_ ).split(' ' ) ) )
return split_tokens
def a_ ( self , lowercase_ ) -> Tuple:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def a_ ( self , lowercase_ ) -> Optional[Any]:
return self.decoder.get(lowercase_ , self.unk_token )
def a_ ( self , lowercase_ ) -> str:
UpperCAmelCase = ' '.join(lowercase_ ).replace('@@ ' , '' ).strip()
return out_string
def a_ ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' )
UpperCAmelCase = 0
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase = token_index
writer.write(' '.join(lowercase_ ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 183
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : Any = []
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight'))
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias'))
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight'))
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias'))
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias'))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
])
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
UpperCamelCase__ : Dict = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight')
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCamelCase__ : Any = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Union[str, Any] = dct.pop(lowerCamelCase_)
UpperCamelCase__ : List[Any] = val
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
if "handwritten" in checkpoint_url:
UpperCamelCase__ : Any = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase__ : List[Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw).convert('RGB')
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = ViTConfig(image_size=384 , qkv_bias=lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCamelCase__ : Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCamelCase__ : Union[str, Any] = 1_024
UpperCamelCase__ : Union[str, Any] = 4_096
UpperCamelCase__ : Any = 24
UpperCamelCase__ : List[Any] = 16
UpperCamelCase__ : Union[str, Any] = 1_024
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL')
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : int = '''relu'''
UpperCamelCase__ : int = 1_024
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : Dict = False
UpperCamelCase__ : Dict = False
# load HuggingFace model
UpperCamelCase__ : Union[str, Any] = ViTModel(lowerCamelCase_ , add_pooling_layer=lowerCamelCase_)
UpperCamelCase__ : Dict = TrOCRForCausalLM(lowerCamelCase_)
UpperCamelCase__ : Any = VisionEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_)
model.eval()
# load state_dict of original model, rename some keys
UpperCamelCase__ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' , check_hash=lowerCamelCase_)['''model''']
UpperCamelCase__ : int = create_rename_keys(lowerCamelCase_ , lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCamelCase__ : Union[str, Any] = state_dict.pop(lowerCamelCase_)
if key.startswith('decoder') and "output_projection" not in key:
UpperCamelCase__ : List[str] = val
else:
UpperCamelCase__ : Dict = val
# load state dict
model.load_state_dict(lowerCamelCase_)
# Check outputs on an image
UpperCamelCase__ : int = ViTImageProcessor(size=encoder_config.image_size)
UpperCamelCase__ : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-large')
UpperCamelCase__ : Tuple = TrOCRProcessor(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Dict = processor(images=prepare_img(lowerCamelCase_) , return_tensors='pt').pixel_values
# verify logits
UpperCamelCase__ : Optional[int] = torch.tensor([[model.config.decoder.decoder_start_token_id]])
UpperCamelCase__ : Any = model(pixel_values=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_)
UpperCamelCase__ : List[Any] = outputs.logits
UpperCamelCase__ : Union[str, Any] = torch.Size([1, 1, 50_265])
if "trocr-base-handwritten" in checkpoint_url:
UpperCamelCase__ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311])
elif "trocr-large-handwritten" in checkpoint_url:
UpperCamelCase__ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170])
elif "trocr-base-printed" in checkpoint_url:
UpperCamelCase__ : Any = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210])
elif "trocr-large-printed" in checkpoint_url:
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase_ , atol=1e-3), "First elements of logits not as expected"
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving processor to {pytorch_dump_folder_path}')
processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 596
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a_ ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return {}
def a_ ( ) -> int:
"""simple docstring"""
snake_case : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case : Tuple = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize feature_extractor
snake_case : List[Any] = self.feature_extraction_class()
# Test not batched input
snake_case : List[str] = get_html_strings()[0]
snake_case : Any = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
# Test batched
snake_case : List[str] = get_html_strings()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case : Union[str, Any] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
| 598
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class _lowerCAmelCase ( _lowercase ):
A__ = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ = Features({'audio': Audio()} )
A__ = Features({'labels': ClassLabel} )
A__ = 'audio'
A__ = 'labels'
def __magic_name__( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase__ : Optional[Any] = copy.deepcopy(self )
lowerCAmelCase__ : Optional[int] = self.label_schema.copy()
lowerCAmelCase__ : int = features[self.label_column]
lowerCAmelCase__ : Any = label_schema
return task_template
@property
def __magic_name__( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 713
|
from collections import Counter
from timeit import timeit
def __lowerCAmelCase ( UpperCamelCase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __lowerCAmelCase ( UpperCamelCase = "" ) -> bool:
if len(UpperCamelCase ) == 0:
return True
lowerCAmelCase__ : str = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowerCAmelCase__ : int = character_freq_dict.get(UpperCamelCase , 0 ) + 1
lowerCAmelCase__ : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCAmelCase ( UpperCamelCase = "" ) -> None:
print('''\nFor string = ''' , UpperCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowerCAmelCase_ = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 470
| 0
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : int ):
lowerCAmelCase = len(_UpperCAmelCase )
lowerCAmelCase = int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
lowerCAmelCase = 0
while arr[min(_UpperCAmelCase , _UpperCAmelCase ) - 1] < x:
lowerCAmelCase = step
step += int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCAmelCase = prev + 1
if prev == min(_UpperCAmelCase , _UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__UpperCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Any = [int(item) for item in user_input.split(''',''')]
__UpperCamelCase : Dict = int(input('''Enter the number to be searched:\n'''))
__UpperCamelCase : List[str] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 4
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 ) -> Any:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=0 ) -> int:
A_ : Union[str, Any] = []
for old_item in old_list:
A_ : Tuple = old_item.replace("in_layers.0" , "norm1" )
A_ : str = new_item.replace("in_layers.2" , "conv1" )
A_ : List[str] = new_item.replace("out_layers.0" , "norm2" )
A_ : Optional[int] = new_item.replace("out_layers.3" , "conv2" )
A_ : Union[str, Any] = new_item.replace("emb_layers.1" , "time_emb_proj" )
A_ : Tuple = new_item.replace("skip_connection" , "conv_shortcut" )
A_ : Any = shave_segments(_lowerCAmelCase , n_shave_prefix_segments=_lowerCAmelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=0 ) -> Tuple:
A_ : Any = []
for old_item in old_list:
A_ : int = old_item
A_ : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
A_ : Any = new_item.replace("norm.bias" , "group_norm.bias" )
A_ : Any = new_item.replace("proj_out.weight" , "proj_attn.weight" )
A_ : List[str] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
A_ : List[Any] = shave_segments(_lowerCAmelCase , n_shave_prefix_segments=_lowerCAmelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=None ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
A_ : Any = old_checkpoint[path]
A_ : Tuple = old_tensor.shape[0] // 3
A_ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
A_ : Optional[Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
A_ : int = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
A_ , A_ , A_ : Optional[int] = old_tensor.split(channels // num_heads , dim=1 )
A_ : Tuple = query.reshape(_lowerCAmelCase )
A_ : List[Any] = key.reshape(_lowerCAmelCase )
A_ : Union[str, Any] = value.reshape(_lowerCAmelCase )
for path in paths:
A_ : Any = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
A_ : List[str] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
A_ : int = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
A_ : Tuple = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
A_ : Tuple = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
A_ : Tuple = old_checkpoint[path["old"]][:, :, 0]
else:
A_ : str = old_checkpoint[path["old"]]
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Any = {}
A_ : Union[str, Any] = checkpoint["time_embed.0.weight"]
A_ : List[Any] = checkpoint["time_embed.0.bias"]
A_ : Any = checkpoint["time_embed.2.weight"]
A_ : Optional[Any] = checkpoint["time_embed.2.bias"]
A_ : List[str] = checkpoint["input_blocks.0.0.weight"]
A_ : Union[str, Any] = checkpoint["input_blocks.0.0.bias"]
A_ : str = checkpoint["out.0.weight"]
A_ : Optional[int] = checkpoint["out.0.bias"]
A_ : Optional[int] = checkpoint["out.2.weight"]
A_ : Optional[int] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
A_ : List[str] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
A_ : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the middle blocks only
A_ : Union[str, Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
A_ : Tuple = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the output blocks only
A_ : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
A_ : Optional[Any] = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
for i in range(1 , _lowerCAmelCase ):
A_ : Union[str, Any] = (i - 1) // (config["num_res_blocks"] + 1)
A_ : Optional[int] = (i - 1) % (config["num_res_blocks"] + 1)
A_ : str = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
A_ : Tuple = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
A_ : List[Any] = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
A_ : List[Any] = renew_resnet_paths(_lowerCAmelCase )
A_ : Any = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
A_ : Any = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path, resnet_op] , config=_lowerCAmelCase )
if len(_lowerCAmelCase ):
A_ : Tuple = renew_attention_paths(_lowerCAmelCase )
A_ : List[str] = {
"old": f"input_blocks.{i}.1",
"new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : Any = {
f"input_blocks.{i}.1.qkv.bias": {
"key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
"key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=_lowerCAmelCase , config=_lowerCAmelCase , )
A_ : List[Any] = middle_blocks[0]
A_ : Tuple = middle_blocks[1]
A_ : Dict = middle_blocks[2]
A_ : str = renew_resnet_paths(_lowerCAmelCase )
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
A_ : Tuple = renew_resnet_paths(_lowerCAmelCase )
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
A_ : Any = renew_attention_paths(_lowerCAmelCase )
A_ : Optional[int] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , attention_paths_to_split=_lowerCAmelCase , config=_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
A_ : Optional[int] = i // (config["num_res_blocks"] + 1)
A_ : Optional[Any] = i % (config["num_res_blocks"] + 1)
A_ : Optional[Any] = [shave_segments(_lowerCAmelCase , 2 ) for name in output_blocks[i]]
A_ : Union[str, Any] = {}
for layer in output_block_layers:
A_ , A_ : str = layer.split("." )[0], shave_segments(_lowerCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowerCAmelCase )
else:
A_ : List[str] = [layer_name]
if len(_lowerCAmelCase ) > 1:
A_ : Dict = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
A_ : Optional[Any] = renew_resnet_paths(_lowerCAmelCase )
A_ : Any = renew_resnet_paths(_lowerCAmelCase )
A_ : Optional[Any] = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
A_ : int = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
A_ : Union[str, Any] = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
A_ : int = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(_lowerCAmelCase ) == 2:
A_ : Union[str, Any] = []
if len(_lowerCAmelCase ):
A_ : Dict = renew_attention_paths(_lowerCAmelCase )
A_ : Union[str, Any] = {
"old": f"output_blocks.{i}.1",
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : Any = {
f"output_blocks.{i}.1.qkv.bias": {
"key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
"key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=_lowerCAmelCase , )
else:
A_ : Optional[int] = renew_resnet_paths(_lowerCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
A_ : List[Any] = ".".join(["output_blocks", str(_lowerCAmelCase ), path["old"]] )
A_ : Optional[Any] = ".".join(["up_blocks", str(_lowerCAmelCase ), "resnets", str(_lowerCAmelCase ), path["new"]] )
A_ : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase : str = json.loads(f.read())
_lowerCAmelCase : Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase : List[str] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase : List[str] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 454
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''maskformer'''
UpperCAmelCase : Tuple = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase : Union[str, Any] = ['''resnet''', '''swin''']
UpperCAmelCase : int = ['''detr''']
def __init__( self : Union[str, Any] , _UpperCAmelCase : int = 256 , _UpperCAmelCase : int = 256 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 20.0 , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : List[str] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_A = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = backbone_config.pop('model_type' )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_A = DetrConfig()
else:
# verify that the decoder is supported
_A = (
decoder_config.pop('model_type' ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {",".join(self.decoders_supported )}''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = CONFIG_MAPPING[decoder_type]
_A = config_class.from_dict(_UpperCAmelCase )
_A = backbone_config
_A = decoder_config
# main feature dimension for the model
_A = fpn_feature_size
_A = mask_feature_size
# initializer
_A = init_std
_A = init_xavier_std
# Hungarian matcher && loss
_A = cross_entropy_weight
_A = dice_weight
_A = mask_weight
_A = use_auxiliary_loss
_A = no_object_weight
_A = output_auxiliary_logits
_A = self.decoder_config.encoder_attention_heads
_A = self.decoder_config.num_hidden_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : PretrainedConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : str ):
return cls(
backbone_config=_UpperCAmelCase , decoder_config=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[str] ):
_A = copy.deepcopy(self.__dict__ )
_A = self.backbone_config.to_dict()
_A = self.decoder_config.to_dict()
_A = self.__class__.model_type
return output
| 505
|
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''AutoTokenizer'''
UpperCAmelCase : Optional[Any] = ['''tokenizer''']
UpperCAmelCase : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ):
super().__init__(_UpperCAmelCase )
_A = speaker_embeddings
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int="speaker_embeddings_path.json" , **_UpperCAmelCase : int ):
if speaker_embeddings_dict_path is not None:
_A = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_A = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
_A = json.load(_UpperCAmelCase )
else:
_A = None
_A = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : int="speaker_embeddings_path.json" , _UpperCAmelCase : Union[str, Any]="speaker_embeddings" , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Tuple , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , 'v2' ) , exist_ok=_UpperCAmelCase )
_A = {}
_A = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_A = self._load_voice_preset(_UpperCAmelCase )
_A = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCAmelCase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
_A = os.path.join(_UpperCAmelCase , F'''{prompt_key}_{key}.npy''' )
_A = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str = None , **_UpperCAmelCase : Optional[int] ):
_A = self.speaker_embeddings[voice_preset]
_A = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_A = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_A = np.load(_UpperCAmelCase )
return voice_preset_dict
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]="pt" , _UpperCAmelCase : Union[str, Any]=256 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=False , **_UpperCAmelCase : Any , ):
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_A = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_A = voice_preset + '.npz'
_A = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
_A = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
_A = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
_A = voice_preset
return encoded_text
| 505
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _UpperCAmelCase : Union[str, List[str]] = None , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
UpperCAmelCase_ = kwargs.pop("text_queries" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCAmelCase_ = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def lowercase__ ( self : str , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase_ = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(inputs["image"] )
UpperCAmelCase_ = inputs["candidate_labels"]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = candidate_labels.split("," )
UpperCAmelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("target_size" )
UpperCAmelCase_ = model_inputs.pop("candidate_label" )
UpperCAmelCase_ = model_inputs.pop("is_last" )
UpperCAmelCase_ = self.model(**_UpperCAmelCase )
UpperCAmelCase_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for model_output in model_outputs:
UpperCAmelCase_ = model_output["candidate_label"]
UpperCAmelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ = outputs["scores"][index].item()
UpperCAmelCase_ = self._get_bounding_box(outputs["boxes"][index][0] )
UpperCAmelCase_ = {"score": score, "label": label, "box": box}
results.append(_UpperCAmelCase )
UpperCAmelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCAmelCase_ = results[:top_k]
return results
def lowercase__ ( self : str , _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82
|
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int = 1_000_000 ) -> int:
lowerCamelCase_ : Optional[int] =set(range(3 , lowerCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) )
lowerCamelCase_ : Any =[float(lowerCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 153
| 0
|
from typing import Any
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =data
UpperCamelCase__ : Optional[Any] =None
def __repr__( self) -> str:
"""simple docstring"""
return F'''Node({self.data})'''
class lowercase__:
'''simple docstring'''
def __init__( self) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any =None
def __iter__( self) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] =self.head
while node:
yield node.data
UpperCamelCase__ : Optional[Any] =node.next
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __repr__( self) -> str:
"""simple docstring"""
return "->".join([str(lowerCAmelCase__) for item in self])
def __getitem__( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
if not 0 <= index < len(self):
raise ValueError("list index out of range.")
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
if not 0 <= index < len(self):
raise ValueError("list index out of range.")
UpperCamelCase__ : Tuple =self.head
for _ in range(lowerCAmelCase__):
UpperCamelCase__ : Union[str, Any] =current.next
UpperCamelCase__ : str =data
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
self.insert_nth(len(self) , lowerCAmelCase__)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
self.insert_nth(0 , lowerCAmelCase__)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
if not 0 <= index <= len(self):
raise IndexError("list index out of range")
UpperCamelCase__ : str =Node(lowerCAmelCase__)
if self.head is None:
UpperCamelCase__ : Optional[Any] =new_node
elif index == 0:
UpperCamelCase__ : Dict =self.head # link new_node to head
UpperCamelCase__ : Optional[int] =new_node
else:
UpperCamelCase__ : Any =self.head
for _ in range(index - 1):
UpperCamelCase__ : List[Any] =temp.next
UpperCamelCase__ : List[Any] =temp.next
UpperCamelCase__ : List[Any] =new_node
def UpperCAmelCase ( self) -> None: # print every node data
"""simple docstring"""
print(self)
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
return self.delete_nth(0)
def UpperCAmelCase ( self) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self) - 1)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE = 0) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError("List index out of range.")
UpperCamelCase__ : List[str] =self.head # default first node
if index == 0:
UpperCamelCase__ : Any =self.head.next
else:
UpperCamelCase__ : Dict =self.head
for _ in range(index - 1):
UpperCamelCase__ : List[Any] =temp.next
UpperCamelCase__ : Dict =temp.next
UpperCamelCase__ : Optional[Any] =temp.next.next
return delete_node.data
def UpperCAmelCase ( self) -> bool:
"""simple docstring"""
return self.head is None
def UpperCAmelCase ( self) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict =None
UpperCamelCase__ : Tuple =self.head
while current:
# Store the current node's next node.
UpperCamelCase__ : Tuple =current.next
# Make the current node's next point backwards
UpperCamelCase__ : Any =prev
# Make the previous node be the current node
UpperCamelCase__ : Any =current
# Make the current node the next node (to progress iteration)
UpperCamelCase__ : str =next_node
# Return prev in order to put the head at the end
UpperCamelCase__ : Optional[int] =prev
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[Any] =LinkedList()
assert linked_list.is_empty() is True
assert str(__A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(__A ) == i
linked_list.insert_nth(__A , i + 1 )
assert str(__A ) == "->".join(str(__A ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(__A ) == "->".join(str(__A ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(__A ) == 9
assert str(__A ) == "->".join(str(__A ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase__ : Optional[int] =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__A ) == "->".join(str(__A ) for i in range(-8 , 1 ) )
def _lowerCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =[
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
UpperCamelCase__ : Any =LinkedList()
for i in test_input:
linked_list.insert_tail(__A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase__ : Any =linked_list.delete_head()
assert result == -9
assert (
str(__A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase__ : Optional[int] =linked_list.delete_tail()
assert result == 12.2
assert (
str(__A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase__ : Any =linked_list.delete_nth(1_0 )
assert result is None
assert (
str(__A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__A )
assert (
str(__A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
from doctest import testmod
testmod()
UpperCamelCase__ : Any =LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__A )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
UpperCamelCase__ : List[str] =input("Enter New Value: " ).strip()
print("New list:" )
print(__A )
print(f'''length of linked_list is : {len(__A )}''' )
if __name__ == "__main__":
main()
| 701
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Tuple =initial_learning_rate
UpperCamelCase__ : List[str] =warmup_steps
UpperCamelCase__ : List[Any] =power
UpperCamelCase__ : Optional[Any] =decay_schedule_fn
UpperCamelCase__ : List[str] =name
def __call__( self , __SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ : Optional[Any] =tf.cast(__SCREAMING_SNAKE_CASE , tf.floataa)
UpperCamelCase__ : Tuple =tf.cast(self.warmup_steps , tf.floataa)
UpperCamelCase__ : Optional[int] =global_step_float / warmup_steps_float
UpperCamelCase__ : List[Any] =self.initial_learning_rate * tf.math.pow(__SCREAMING_SNAKE_CASE , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCamelCase ( A_ : float , A_ : int , A_ : int , A_ : float = 0.0 , A_ : float = 0.9 , A_ : float = 0.999 , A_ : float = 1E-8 , A_ : Optional[float] = None , A_ : Optional[float] = None , A_ : float = 0.0 , A_ : float = 1.0 , A_ : Optional[List[str]] = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict =tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A_ , )
if num_warmup_steps:
UpperCamelCase__ : Dict =WarmUp(
initial_learning_rate=A_ , decay_schedule_fn=A_ , warmup_steps=A_ , )
if weight_decay_rate > 0.0:
UpperCamelCase__ : Union[str, Any] =AdamWeightDecay(
learning_rate=A_ , weight_decay_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=A_ , )
else:
UpperCamelCase__ : List[Any] =tf.keras.optimizers.Adam(
learning_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 0.0_01 , __SCREAMING_SNAKE_CASE = 0.9 , __SCREAMING_SNAKE_CASE = 0.9_99 , __SCREAMING_SNAKE_CASE = 1E-7 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "AdamWeightDecay" , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =weight_decay_rate
UpperCamelCase__ : Dict =include_in_weight_decay
UpperCamelCase__ : int =exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] ={"WarmUp": WarmUp}
return super(__SCREAMING_SNAKE_CASE , cls).from_config(__SCREAMING_SNAKE_CASE , custom_objects=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self)._prepare_local(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] =self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[str] =list(zip(*__SCREAMING_SNAKE_CASE))
return super(__SCREAMING_SNAKE_CASE , self).apply_gradients(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , name=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ : Optional[int] =apply_state or {}
UpperCamelCase__ : Optional[Any] =apply_state.get((var_device, var_dtype))
if coefficients is None:
UpperCamelCase__ : Any =self._fallback_apply_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[Any] =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_dense(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Dict:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Any =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_sparse(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any =super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return False
return True
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self) -> int:
"""simple docstring"""
UpperCamelCase__ : str =[]
UpperCamelCase__ : List[str] =None
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if self._accum_steps is None:
UpperCamelCase__ : Any =tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
if not self._gradients:
UpperCamelCase__ : Any =self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__SCREAMING_SNAKE_CASE) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(__SCREAMING_SNAKE_CASE) != len(self._gradients):
raise ValueError(F'''Expected {len(self._gradients)} gradients, but got {len(__SCREAMING_SNAKE_CASE)}''')
for accum_gradient, gradient in zip(self._gradients , __SCREAMING_SNAKE_CASE):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__SCREAMING_SNAKE_CASE)
self._accum_steps.assign_add(1)
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__SCREAMING_SNAKE_CASE))
| 582
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_SCREAMING_SNAKE_CASE = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_SCREAMING_SNAKE_CASE = BASE_URL + '/user'
# https://github.com/settings/tokens
_SCREAMING_SNAKE_CASE = os.environ.get("""USER_TOKEN""", """""")
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> dict[Any, Any]:
"""simple docstring"""
__snake_case = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("""\'USER_TOKEN\' field cannot be empty.""")
| 163
|
"""simple docstring"""
import socket
def snake_case__ ( ) ->Optional[Any]:
"""simple docstring"""
__lowercase : Dict = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
__lowercase : List[Any] = socket.gethostname()
__lowercase : List[str] = 1_23_12
sock.connect((host, port) )
sock.send(b"Hello server!" )
with open("Received_file", "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
__lowercase : Optional[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(_lowerCamelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 575
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , snake_case__ : Dict=0.01 , snake_case__ : Optional[Any]=10_00 ):
"""simple docstring"""
A =p_stop
A =max_length
def __iter__( self : Optional[Any] ):
"""simple docstring"""
A =0
A =False
while not stop and count < self.max_length:
yield count
count += 1
A =random.random() < self.p_stop
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[Any]=False , snake_case__ : Any=True ):
"""simple docstring"""
A =[
BatchSamplerShard(snake_case__ , 2 , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
for i in range(2 )
]
A =[list(snake_case__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case__ ) for shard in batch_sampler_shards] , [len(snake_case__ ) for e in expected] )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
A =[[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
A =[[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
A =[[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
A =[[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
def _a ( self : Any ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
A =[[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
A =[[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
A =[[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
A =[[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A =[[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
A =[BatchSamplerShard(snake_case__ , 2 , snake_case__ , even_batches=snake_case__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _a ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=False ):
"""simple docstring"""
random.seed(snake_case__ )
A =list(snake_case__ )
A =[
IterableDatasetShard(
snake_case__ , batch_size=snake_case__ , drop_last=snake_case__ , num_processes=snake_case__ , process_index=snake_case__ , split_batches=snake_case__ , )
for i in range(snake_case__ )
]
A =[]
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case__ )
iterable_dataset_lists.append(list(snake_case__ ) )
A =batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A =iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
self.assertTrue(len(snake_case__ ) % shard_batch_size == 0 )
A =[]
for idx in range(0 , len(snake_case__ ) , snake_case__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case__ ) < len(snake_case__ ):
reference += reference
self.assertListEqual(snake_case__ , reference[: len(snake_case__ )] )
def _a ( self : int ):
"""simple docstring"""
A =42
A =RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
# Edge case with a very small dataset
A =RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A =BatchSampler(range(16 ) , batch_size=4 , drop_last=snake_case__ )
A =SkipBatchSampler(snake_case__ , 2 )
self.assertListEqual(list(snake_case__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : str ):
"""simple docstring"""
A =SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A =DataLoader(list(range(16 ) ) , batch_size=4 )
A =skip_first_batches(snake_case__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : int ):
"""simple docstring"""
A =DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _a ( self : List[Any] ):
"""simple docstring"""
Accelerator()
A =DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 689
|
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689
| 1
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __a ):
_UpperCAmelCase = (DDPMScheduler,)
def UpperCamelCase ( self , **A__ ) -> str:
snake_case = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A__ )
return config
def UpperCamelCase ( self ) -> Optional[Any]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A__ )
def UpperCamelCase ( self ) -> str:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def UpperCamelCase ( self ) -> Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A__ )
def UpperCamelCase ( self ) -> List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A__ )
def UpperCamelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=A__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A__ , prediction_type=A__ , sample_max_value=A__ , )
def UpperCamelCase ( self ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def UpperCamelCase ( self ) -> List[Any]:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = len(A__ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
snake_case = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(A__ ) )
snake_case = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCamelCase ( self ) -> Any:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case = scheduler_class(**A__ )
snake_case = len(A__ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
snake_case = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(A__ ) )
snake_case = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCamelCase ( self ) -> int:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A__ )
snake_case = scheduler.timesteps
for i, timestep in enumerate(A__ ):
if i == len(A__ ) - 1:
snake_case = -1
else:
snake_case = timesteps[i + 1]
snake_case = scheduler.previous_timestep(A__ )
snake_case = prev_t.item()
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 51, 0]
with self.assertRaises(A__ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [1_00, 87, 50, 1, 0]
snake_case = len(A__ )
with self.assertRaises(A__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A__ , timesteps=A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**A__ )
snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A__ )
| 342
|
'''simple docstring'''
import math
def __UpperCamelCase ( a : int ) ->list[int]:
snake_case = []
snake_case = 2
snake_case = int(math.sqrt(a ) ) # Size of every segment
snake_case = [True] * (end + 1)
snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
snake_case = False
start += 1
prime += in_prime
snake_case = end + 1
snake_case = min(2 * end , a )
while low <= n:
snake_case = [True] * (high - low + 1)
for each in in_prime:
snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
snake_case = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
snake_case = high + 1
snake_case = min(high + end , a )
return prime
print(sieve(10**6))
| 342
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674
| 0
|
"""simple docstring"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
assert column_title.isupper()
_lowerCAmelCase = 0
_lowerCAmelCase = len(__lowerCamelCase ) - 1
_lowerCAmelCase = 0
while index >= 0:
_lowerCAmelCase = (ord(column_title[index] ) - 6_4) * pow(2_6, __lowerCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 589
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# Initialise PyTorch model
_lowerCAmelCase = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
_lowerCAmelCase = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
_lowerCAmelCase = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
a__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 589
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = RemBertConfig.from_json_file(__magic_name__ )
print("""Building PyTorch model from configuration: {}""".format(str(__magic_name__ ) ) )
snake_case__ : Optional[int] = RemBertModel(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(__magic_name__ ) )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 419
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableDiffusionInstructPixaPixPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
snake_case__ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ : Optional[Any] = CLIPTextModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case__ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Any = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.get_dummy_components()
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sd_pipe(**__SCREAMING_SNAKE_CASE ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = """french fries"""
snake_case__ : Union[str, Any] = sd_pipe(**__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = [inputs["""prompt"""]] * 2
snake_case__ : List[str] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
snake_case__ : int = torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = image / 2 + 0.5
snake_case__ : str = image.permute(0 , 3 , 1 , 2 )
snake_case__ : List[str] = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : Optional[int] = sd_pipe(**__SCREAMING_SNAKE_CASE ).images
snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case__ : Optional[Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sd_pipe(**__SCREAMING_SNAKE_CASE ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
snake_case__ : List[str] = [round(__SCREAMING_SNAKE_CASE , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__SCREAMING_SNAKE_CASE ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : List[str] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = VaeImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__SCREAMING_SNAKE_CASE , input_image_type="""pt""" ) )[0]
snake_case__ : Optional[int] = components["""vae"""]
snake_case__ : int = self.get_dummy_inputs_by_type(__SCREAMING_SNAKE_CASE , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : Union[str, Any] = pipe(**__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Union[str, Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Optional[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
snake_case__ : Tuple = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Tuple = pipe(**__SCREAMING_SNAKE_CASE ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Dict = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
snake_case__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : int = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Tuple = pipe(**__SCREAMING_SNAKE_CASE ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[str] = 0
def callback_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
snake_case__ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : Optional[Any] = latents[0, -3:, -3:, -1]
snake_case__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case__ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : List[str] = latents[0, -3:, -3:, -1]
snake_case__ : int = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case__ : List[str] = False
snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
pipe(**__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
snake_case__ : str = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Any = pipe(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Tuple = inputs["""image"""].resize((5_0_4, 5_0_4) )
snake_case__ : Dict = """timbrooks/instruct-pix2pix"""
snake_case__ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
snake_case__ : Tuple = pipe(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = output.images[0]
snake_case__ : Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case__ : int = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 419
| 1
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__a : Tuple = logging.get_logger(__name__)
__a : Optional[int] = '''T5Config'''
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
lowercase__ : str = jnp.zeros_like(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase__ : Union[str, Any] = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = jnp.where(shifted_input_ids == -1_00 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return shifted_input_ids
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Dict = """mt5"""
a : str = MTaConfig
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Any = """mt5"""
a : str = MTaConfig
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Dict = """mt5"""
a : Union[str, Any] = MTaConfig
| 397
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__a : List[Any] = logging.getLogger(__name__)
__a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
__a : List[str] = {'''facebook/bart-base''': BartTokenizer}
def snake_case_ ( ) -> List[Any]:
lowercase__ : Any = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" ,type=SCREAMING_SNAKE_CASE_ ,default=5 ,help="The maximum total input sequence length after tokenization." ,)
parser.add_argument(
"--num_beams" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) ,)
parser.add_argument(
"--model_name_or_path" ,type=SCREAMING_SNAKE_CASE_ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,required=SCREAMING_SNAKE_CASE_ ,)
parser.add_argument(
"--config_name" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="Pretrained config name or path if not the same as model_name" ,)
parser.add_argument(
"--device" ,type=SCREAMING_SNAKE_CASE_ ,default="cpu" ,help="Device where the model will be run" ,)
parser.add_argument("--output_file_path" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="Where to store the final ONNX file." )
lowercase__ : str = parser.parse_args()
return args
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="cpu" ) -> str:
lowercase__ : int = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ )
if model_name in ["facebook/bart-base"]:
lowercase__ : Any = 0
lowercase__ : List[str] = None
lowercase__ : int = 0
return huggingface_model, tokenizer
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
model.eval()
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE_ ) )
with torch.no_grad():
lowercase__ : str = "My friends are cool but they eat too many carbs."
lowercase__ : Optional[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=10_24 ,return_tensors="pt" ).to(model.device )
lowercase__ : Optional[int] = model.generate(
inputs["input_ids"] ,attention_mask=inputs["attention_mask"] ,num_beams=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,early_stopping=SCREAMING_SNAKE_CASE_ ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
SCREAMING_SNAKE_CASE_ ,(
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,SCREAMING_SNAKE_CASE_ ,opset_version=14 ,input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] ,output_names=["output_ids"] ,dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} ,example_outputs=SCREAMING_SNAKE_CASE_ ,)
logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : Tuple = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = ort_sess.run(
SCREAMING_SNAKE_CASE_ ,{
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(SCREAMING_SNAKE_CASE_ ),
"max_length": np.array(SCREAMING_SNAKE_CASE_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1E-3 ,atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def snake_case_ ( ) -> List[Any]:
lowercase__ : Optional[Any] = parse_args()
lowercase__ : List[Any] = 5
lowercase__ : List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase__ : Optional[int] = torch.device(args.device )
lowercase__ , lowercase__ : Any = load_model_tokenizer(args.model_name_or_path ,SCREAMING_SNAKE_CASE_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(SCREAMING_SNAKE_CASE_ )
if args.max_length:
lowercase__ : Tuple = args.max_length
if args.num_beams:
lowercase__ : str = args.num_beams
if args.output_file_path:
lowercase__ : str = args.output_file_path
else:
lowercase__ : Tuple = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 397
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( __lowerCamelCase ):
_lowerCamelCase : Optional[int]= '''longformer'''
def __init__( self , _snake_case = 512 , _snake_case = 2 , _snake_case = 1 , _snake_case = 0 , _snake_case = 2 , _snake_case = 3_0522 , _snake_case = 768 , _snake_case = 12 , _snake_case = 12 , _snake_case = 3072 , _snake_case = "gelu" , _snake_case = 0.1 , _snake_case = 0.1 , _snake_case = 512 , _snake_case = 2 , _snake_case = 0.02 , _snake_case = 1e-12 , _snake_case = False , **_snake_case , ) -> Any:
super().__init__(pad_token_id=a_ , **a_)
UpperCAmelCase_ : str = attention_window
UpperCAmelCase_ : Union[str, Any] = sep_token_id
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : Optional[int] = onnx_export
class lowercase ( __lowerCamelCase ):
def __init__( self , _snake_case , _snake_case = "default" , _snake_case = None) -> Any:
super().__init__(a_ , a_ , a_)
UpperCAmelCase_ : Union[str, Any] = True
@property
def _snake_case ( self) -> List[str]:
if self.task == "multiple-choice":
UpperCAmelCase_ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def _snake_case ( self) -> List[str]:
UpperCAmelCase_ : List[str] = super().outputs
if self.task == "default":
UpperCAmelCase_ : Optional[int] = {0: "batch"}
return outputs
@property
def _snake_case ( self) -> Any:
return 1e-4
@property
def _snake_case ( self) -> Dict:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def _snake_case ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> str:
UpperCAmelCase_ : int = super().generate_dummy_inputs(
preprocessor=a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : int = torch.zeros_like(inputs['input_ids'])
# make every second token global
UpperCAmelCase_ : List[str] = 1
return inputs
| 707
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Any = np.argmax(UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[str]:
with open(UpperCamelCase ,encoding='utf_8' ) as f:
UpperCAmelCase_ : int = csv.reader(UpperCamelCase )
UpperCAmelCase_ : Tuple = []
next(UpperCamelCase ) # skip the first line
for line in tqdm(UpperCamelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : str = []
for dataset in encoded_datasets:
UpperCAmelCase_ : str = len(UpperCamelCase )
UpperCAmelCase_ : str = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
UpperCAmelCase_ : List[Any] = np.zeros((n_batch, 2) ,dtype=np.intaa )
UpperCAmelCase_ : Any = np.full((n_batch, 2, input_len) ,fill_value=-1_0_0 ,dtype=np.intaa )
UpperCAmelCase_ : List[Any] = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Tuple = with_conta
UpperCAmelCase_ : Union[str, Any] = len(UpperCamelCase ) - 1
UpperCAmelCase_ : int = len(UpperCamelCase ) - 1
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Optional[int] = mc_label
UpperCAmelCase_ : Tuple = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE( ) -> str:
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' ,type=UpperCamelCase ,default='openai-gpt' ,help='pretrained model name' )
parser.add_argument('--do_train' ,action='store_true' ,help='Whether to run training.' )
parser.add_argument('--do_eval' ,action='store_true' ,help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' ,default=UpperCamelCase ,type=UpperCamelCase ,required=UpperCamelCase ,help='The output directory where the model predictions and checkpoints will be written.' ,)
parser.add_argument('--train_dataset' ,type=UpperCamelCase ,default='' )
parser.add_argument('--eval_dataset' ,type=UpperCamelCase ,default='' )
parser.add_argument('--seed' ,type=UpperCamelCase ,default=4_2 )
parser.add_argument('--num_train_epochs' ,type=UpperCamelCase ,default=3 )
parser.add_argument('--train_batch_size' ,type=UpperCamelCase ,default=8 )
parser.add_argument('--eval_batch_size' ,type=UpperCamelCase ,default=1_6 )
parser.add_argument('--adam_epsilon' ,default=1e-8 ,type=UpperCamelCase ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,type=UpperCamelCase ,default=1 )
parser.add_argument(
'--max_steps' ,default=-1 ,type=UpperCamelCase ,help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=UpperCamelCase ,default=1 ,help='Number of updates steps to accumulate before performing a backward/update pass.' ,)
parser.add_argument('--learning_rate' ,type=UpperCamelCase ,default=6.25e-5 )
parser.add_argument('--warmup_steps' ,default=0 ,type=UpperCamelCase ,help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' ,type=UpperCamelCase ,default='warmup_linear' )
parser.add_argument('--weight_decay' ,type=UpperCamelCase ,default=0.01 )
parser.add_argument('--lm_coef' ,type=UpperCamelCase ,default=0.9 )
parser.add_argument('--n_valid' ,type=UpperCamelCase ,default=3_7_4 )
parser.add_argument('--server_ip' ,type=UpperCamelCase ,default='' ,help='Can be used for distant debugging.' )
parser.add_argument('--server_port' ,type=UpperCamelCase ,default='' ,help='Can be used for distant debugging.' )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
print(UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase_ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(UpperCamelCase ,UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase_ : Optional[int] = ['_start_', '_delimiter_', '_classify_']
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase ) )
model.to(UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase ):
if isinstance(UpperCamelCase ,UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase ) )
elif isinstance(UpperCamelCase ,UpperCamelCase ):
return obj
return [tokenize_and_encode(UpperCamelCase ) for o in obj]
logger.info('Encoding dataset...' )
UpperCAmelCase_ : List[str] = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase_ : Optional[Any] = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase_ : Dict = (train_dataset, eval_dataset)
UpperCAmelCase_ : Union[str, Any] = tokenize_and_encode(UpperCamelCase )
# Compute the max input length for the Transformer
UpperCAmelCase_ : Dict = model.config.n_positions // 2 - 2
UpperCAmelCase_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase_ : Dict = min(UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase_ : Optional[int] = pre_process_datasets(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,*UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase_ : Optional[int] = TensorDataset(*UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = RandomSampler(UpperCamelCase )
UpperCAmelCase_ : int = DataLoader(UpperCamelCase ,sampler=UpperCamelCase ,batch_size=args.train_batch_size )
UpperCAmelCase_ : int = TensorDataset(*UpperCamelCase )
UpperCAmelCase_ : Optional[int] = SequentialSampler(UpperCamelCase )
UpperCAmelCase_ : Tuple = DataLoader(UpperCamelCase ,sampler=UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase_ : List[str] = args.max_steps
UpperCAmelCase_ : List[str] = args.max_steps // (len(UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase_ : List[str] = len(UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase_ : Any = list(model.named_parameters() )
UpperCAmelCase_ : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
UpperCAmelCase_ : Dict = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
UpperCAmelCase_ : Any = AdamW(UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=UpperCamelCase )
if args.do_train:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='Epoch' ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = tqdm(UpperCamelCase ,desc='Training' )
for step, batch in enumerate(UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = tuple(t.to(UpperCamelCase ) for t in batch )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch
UpperCAmelCase_ : Dict = model(UpperCamelCase ,mc_token_ids=UpperCamelCase ,lm_labels=UpperCamelCase ,mc_labels=UpperCamelCase )
UpperCAmelCase_ : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase_ : Dict = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase_ : List[Any] = 'Training loss: {:.2e} lr: {:.2e}'.format(UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase_ : int = model.module if hasattr(UpperCamelCase ,'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase_ : int = os.path.join(args.output_dir ,UpperCamelCase )
UpperCAmelCase_ : Any = os.path.join(args.output_dir ,UpperCamelCase )
torch.save(model_to_save.state_dict() ,UpperCamelCase )
model_to_save.config.to_json_file(UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase )
if args.do_eval:
model.eval()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 0, 0
UpperCAmelCase_ , UpperCAmelCase_ : Any = 0, 0
for batch in tqdm(UpperCamelCase ,desc='Evaluating' ):
UpperCAmelCase_ : Union[str, Any] = tuple(t.to(UpperCamelCase ) for t in batch )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = batch
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = model(
UpperCamelCase ,mc_token_ids=UpperCamelCase ,lm_labels=UpperCamelCase ,mc_labels=UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = mc_logits.detach().cpu().numpy()
UpperCAmelCase_ : int = mc_labels.to('cpu' ).numpy()
UpperCAmelCase_ : Optional[int] = accuracy(UpperCamelCase ,UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase_ : str = eval_loss / nb_eval_steps
UpperCAmelCase_ : str = eval_accuracy / nb_eval_examples
UpperCAmelCase_ : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase_ : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
UpperCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir ,'eval_results.txt' )
with open(UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' ,UpperCamelCase ,str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 471
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Optional[Any] = original_name.split(""".""" )[0]
lowercase : List[Any] = key.split(""".""" )
lowercase : Dict = int(key_list[key_list.index(snake_case__ ) - 2] )
lowercase : str = int(key_list[key_list.index(snake_case__ ) - 1] )
lowercase : Any = orig_block_num - offset
lowercase : List[Any] = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : List[Any] = OrderedDict()
lowercase , lowercase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
lowercase : Dict = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase : Optional[Any] = key[: key.find("""proj""" )]
lowercase : Union[str, Any] = key.replace(snake_case__ , f"patch_embeddings.{total_embed_found}." )
lowercase : Any = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase : Union[str, Any] = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
lowercase : Any = replace_key_with_offset(snake_case__ , snake_case__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
lowercase : Optional[Any] = replace_key_with_offset(snake_case__ , snake_case__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
lowercase : Tuple = replace_key_with_offset(snake_case__ , snake_case__ , """norm1""" , """before_norm""" )
if "norm2" in key:
lowercase : str = replace_key_with_offset(snake_case__ , snake_case__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
lowercase : List[str] = replace_key_with_offset(snake_case__ , snake_case__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
lowercase : List[Any] = replace_key_with_offset(snake_case__ , snake_case__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
lowercase : int = key.replace("""head""" , """classifier""" )
lowercase : int = value
return new_state_dict
def _snake_case( ) -> Optional[Any]:
lowercase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Optional[int] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : List[str] = PoolFormerConfig()
# set attributes based on model_name
lowercase : int = """huggingface/label-files"""
lowercase : Optional[int] = model_name[-3:]
lowercase : str = 1_000
lowercase : List[str] = """imagenet-1k-id2label.json"""
lowercase : Tuple = (1, 1_000)
# set config attributes
lowercase : Tuple = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : Optional[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
lowercase : Optional[int] = idalabel
lowercase : Optional[int] = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase : Union[str, Any] = [2, 2, 6, 2]
lowercase : Optional[Any] = [64, 128, 320, 512]
lowercase : Tuple = 4.0
lowercase : Optional[int] = 0.9
elif size == "s24":
lowercase : Optional[Any] = [4, 4, 12, 4]
lowercase : List[str] = [64, 128, 320, 512]
lowercase : List[Any] = 4.0
lowercase : int = 0.9
elif size == "s36":
lowercase : Dict = [6, 6, 18, 6]
lowercase : str = [64, 128, 320, 512]
lowercase : List[Any] = 4.0
lowercase : Union[str, Any] = 1e-6
lowercase : Any = 0.9
elif size == "m36":
lowercase : int = [6, 6, 18, 6]
lowercase : Any = [96, 192, 384, 768]
lowercase : Any = 4.0
lowercase : Dict = 1e-6
lowercase : Optional[int] = 0.95
elif size == "m48":
lowercase : Dict = [8, 8, 24, 8]
lowercase : Optional[Any] = [96, 192, 384, 768]
lowercase : str = 4.0
lowercase : List[str] = 1e-6
lowercase : List[str] = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
lowercase : Any = PoolFormerImageProcessor(crop_pct=snake_case__ )
# Prepare image
lowercase : Optional[Any] = prepare_img()
lowercase : Tuple = image_processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
lowercase : Any = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowercase : List[Any] = rename_keys(snake_case__ )
# create HuggingFace model and load state dict
lowercase : List[Any] = PoolFormerForImageClassification(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Define image processor
lowercase : List[str] = PoolFormerImageProcessor(crop_pct=snake_case__ )
lowercase : Optional[int] = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
lowercase : Optional[Any] = model(snake_case__ )
lowercase : Optional[int] = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase : Dict = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
lowercase : Tuple = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
lowercase : int = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
lowercase : Any = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
lowercase : Any = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 336
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def _a ( self , lowercase_) -> int:
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_)
__snake_case = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = AutoConfig.from_pretrained('gpt2')
__snake_case = GenerationConfig.from_model_config(lowercase_)
__snake_case = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _a ( self) -> str:
__snake_case = GenerationConfig()
__snake_case = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
__snake_case = copy.deepcopy(lowercase_)
__snake_case = generation_config.update(**lowercase_)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {'foo': 'bar'})
def _a ( self) -> Optional[Any]:
__snake_case = GenerationConfig()
__snake_case = 'bar'
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(lowercase_)
__snake_case = GenerationConfig.from_pretrained(lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar')
__snake_case = GenerationConfig.from_model_config(lowercase_)
assert not hasattr(lowercase_ , 'foo') # no new kwargs should be initialized if from config
def _a ( self) -> Optional[Any]:
__snake_case = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , lowercase_)
self.assertEqual(default_config.num_beams , 1)
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , lowercase_)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_)
__snake_case = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def _a ( cls) -> List[str]:
__snake_case = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _a ( cls) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def _a ( self) -> List[Any]:
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='test-generation-config' , push_to_hub=lowercase_ , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _a ( self) -> str:
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='valid_org/test-generation-config-org' , push_to_hub=lowercase_ , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
| 313
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __UpperCAmelCase ( __A=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(add_help=__A , allow_abbrev=__A )
# The main config parser
UpperCAmelCase__ = config_command_parser(__A )
# The subparser to add commands to
UpperCAmelCase__ = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(__A , parents=[parent_parser] )
update_command_parser(__A , parents=[parent_parser] )
return config_parser
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = get_config_parser()
UpperCAmelCase__ = config_parser.parse_args()
if not hasattr(__A , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(__A )
if __name__ == "__main__":
main()
| 711
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= BartphoTokenizer
A__= False
A__= True
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
UpperCAmelCase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCAmelCase__ = {"unk_token": "<unk>"}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
UpperCAmelCase__ = BartphoTokenizer(_lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Dict , **_lowercase : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _UpperCAmelCase ( self : List[Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = "This is a là test"
UpperCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = BartphoTokenizer(_lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCAmelCase__ = "This is a là test"
UpperCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
UpperCAmelCase__ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
| 277
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE__ = BlipaProcessor(_snake_case , _snake_case )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Union[str, Any] , **_snake_case : List[Any] ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case ).tokenizer
def lowerCAmelCase_ ( self : Optional[Any] , **_snake_case : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case ).image_processor
def lowerCAmelCase_ ( self : str ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(_snake_case , return_tensors="np" )
SCREAMING_SNAKE_CASE__ = processor(images=_snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase_ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case )
SCREAMING_SNAKE_CASE__ = "lower newer"
SCREAMING_SNAKE_CASE__ = processor(text=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer(_snake_case , return_token_type_ids=_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case )
SCREAMING_SNAKE_CASE__ = "lower newer"
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def lowerCAmelCase_ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case )
SCREAMING_SNAKE_CASE__ = "lower newer"
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=_snake_case , images=_snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 159
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {}
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "llama"
a = ["past_key_values"]
def __init__( self : str , _snake_case : str=32000 , _snake_case : Any=4096 , _snake_case : int=11008 , _snake_case : List[str]=32 , _snake_case : Optional[int]=32 , _snake_case : Dict=None , _snake_case : Any="silu" , _snake_case : Optional[int]=2048 , _snake_case : Any=0.02 , _snake_case : List[str]=1e-6 , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=0 , _snake_case : List[Any]=1 , _snake_case : Dict=2 , _snake_case : Tuple=1 , _snake_case : str=False , _snake_case : List[str]=None , **_snake_case : Tuple , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = num_key_value_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = rms_norm_eps
SCREAMING_SNAKE_CASE__ = pretraining_tp
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("type" , _snake_case )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("factor" , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 159
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : str = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__A : Dict = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__A : Dict = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]="auto" , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : Any=0.9 , __lowerCamelCase : int=5 , __lowerCamelCase : List[str]=500 , __lowerCamelCase : Optional[int]="gpt2-large" , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : List[str]=25 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : int=True , __lowerCamelCase : Dict=25 , ):
SCREAMING_SNAKE_CASE = compute_mauve(
p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , )
return out
| 698
|
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __a ( A__ : str , A__ : str , A__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCamelCase : Union[str, Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = {}
with open(A , '''r''' ) as file:
for line_number, line in enumerate(A ):
lowercase__ = line.strip()
if line:
lowercase__ = line.split()
lowercase__ = line_number
lowercase__ = words[0]
lowercase__ = value
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(A , A )
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
lowercase__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowercase__ = '''param'''
if weight_type is not None and weight_type != "param":
lowercase__ = getattr(A , A ).shape
elif weight_type is not None and weight_type == "param":
lowercase__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
lowercase__ = getattr(A , A )
lowercase__ = shape_pointer.shape
# let's reduce dimension
lowercase__ = value[0]
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
lowercase__ = getattr(A , A )
lowercase__ = value
else:
lowercase__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
lowercase__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowercase__ = '''param'''
if weight_type is not None and weight_type != "param":
lowercase__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase__ = '''.'''.join([key, hf_param_name] )
else:
lowercase__ = key
lowercase__ = value if '''lm_head''' in full_key else value[0]
lowerCamelCase : List[Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _SCREAMING_SNAKE_CASE (A , A , A=None , A=None ) -> int:
"""simple docstring"""
lowercase__ = False
for key, mapped_key in MAPPING.items():
lowercase__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(A )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , A )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = '''weight'''
else:
lowercase__ = None
if hf_dict is not None:
rename_dict(A , A , A , A , A )
else:
set_recursively(A , A , A , A , A )
return is_used
return is_used
def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
else:
lowercase__ = load_wavaveca_layer(A , A , A )
if not is_used:
unused_weights.append(A )
logger.warning(f"Unused weights: {unused_weights}" )
def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> Optional[int]:
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowercase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowercase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
lowercase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
lowercase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (A , A , A=None , A=None , A=True , A=False ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
lowercase__ = WavaVecaConfig.from_pretrained(A )
else:
lowercase__ = WavaVecaConfig()
if is_seq_class:
lowercase__ = read_txt_into_dict(A )
lowercase__ = idalabel
lowercase__ = WavaVecaForSequenceClassification(A )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
feature_extractor.save_pretrained(A )
elif is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 0
lowercase__ = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
lowercase__ = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
lowercase__ = True if config.feat_extract_norm == '''layer''' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowercase__ = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowercase__ = WavaVecaForCTC(A )
else:
lowercase__ = WavaVecaForPreTraining(A )
if is_finetuned or is_seq_class:
lowercase__ ,lowercase__ ,lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase__ = argparse.Namespace(task='''audio_pretraining''' )
lowercase__ = fairseq.tasks.setup_task(A )
lowercase__ ,lowercase__ ,lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
lowercase__ = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCamelCase : Optional[int] = parser.parse_args()
lowerCamelCase : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 460
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : str=13 , UpperCamelCase : Optional[int]=7 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Union[str, Any]=33 , UpperCamelCase : List[Any]=32 , UpperCamelCase : Dict=5 , UpperCamelCase : str=4 , UpperCamelCase : str=37 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Any=16 , UpperCamelCase : List[Any]=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Tuple=3 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = EsmModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self : str , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = EsmForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = EsmForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Optional[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[int] = ()
lowerCAmelCase__ : Any = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Any = True
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = EsmModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = EsmModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()[0]
lowercase__ = EsmEmbeddings(config=UpperCamelCase )
lowercase__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase__ = create_position_ids_from_input_ids(UpperCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase , UpperCamelCase ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()[0]
lowercase__ = EsmEmbeddings(config=UpperCamelCase )
lowercase__ = torch.empty(2 , 4 , 30 )
lowercase__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase__ = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase , UpperCamelCase ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(UpperCamelCase )[0]
lowercase__ = 33
lowercase__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase )
lowercase__ = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowercase__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ = model(UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase__ = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
| 460
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283
|
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : str = [0 for i in range(len(__UpperCAmelCase ) )]
# initialize interval's left pointer and right pointer
_lowercase , _lowercase : str = 0, 0
for i in range(1 ,len(__UpperCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
_lowercase : Union[str, Any] = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
_lowercase : Any = min_edge
while go_next(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_lowercase , _lowercase : Optional[int] = i, i + z_result[i] - 1
return z_result
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
return i + z_result[i] < len(__UpperCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_lowercase : List[str] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__UpperCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _snake_case ( ):
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=__snake_case , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=__snake_case , default=5 )
parser.add_argument('''--batch_size''' , type=__snake_case , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=__snake_case , default=1 )
parser.add_argument('''--freeze''' , type=__snake_case , default=__snake_case )
parser.add_argument('''--learning_rate''' , type=__snake_case , default=5E-4 )
parser.add_argument('''--seed''' , type=__snake_case , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=__snake_case , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=__snake_case , default=10 )
parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.01 )
parser.add_argument('''--output_dir''' , type=__snake_case , default='''./results''' )
return parser.parse_args()
_lowerCAmelCase = load("accuracy")
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = eval_pred
_UpperCamelCase = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Optional[int] , _A : Any ):
super().__init__()
_UpperCamelCase = trainer
def UpperCamelCase_ ( self : int , _A : List[str] , _A : List[str] , _A : Union[str, Any] , **_A : Tuple ):
if control.should_evaluate:
_UpperCamelCase = deepcopy(_A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _snake_case ( ):
_UpperCamelCase = get_args()
set_seed(args.seed )
_UpperCamelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCamelCase = dataset.train_test_split(test_size=0.2 )
_UpperCamelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCamelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCamelCase = False
_UpperCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__snake_case ):
_UpperCamelCase = tokenizer(example['''src'''] , truncation=__snake_case , max_length=1024 )
_UpperCamelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCamelCase = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCamelCase = DataCollatorWithPadding(tokenizer=__snake_case )
_UpperCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('''Training...''' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 10
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ = logging.getLogger()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("-f")
UpperCamelCase : Dict = parser.parse_args()
return args.f
def A__ ( A : Tuple):
'''simple docstring'''
UpperCamelCase : Tuple = {}
UpperCamelCase : Union[str, Any] = os.path.join(A , "all_results.json")
if os.path.exists(A):
with open(A , "r") as f:
UpperCamelCase : Optional[Any] = json.load(A)
else:
raise ValueError(F'''can\'t find {path}''')
return results
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
lowerCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Any:
'''simple docstring'''
UpperCamelCase : List[Any] = tempfile.mkdtemp()
UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase : Tuple = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase : Any = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCamelCase : List[str] = get_results(lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase : Tuple = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCamelCase : str = get_results(lowerCamelCase )
self.assertLess(result["perplexity"] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase : Optional[int] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase : Optional[Any] = get_results(lowerCamelCase )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : str = 7 if get_gpu_count() > 1 else 2
UpperCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase : Union[str, Any] = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase : List[str] = get_results(lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = self.get_auto_remove_tmp_dir()
UpperCamelCase : str = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase : Optional[int] = get_results(lowerCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
UpperCamelCase : Tuple = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase : Tuple = get_results(lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase : Union[str, Any] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase : List[str] = get_results(lowerCamelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : int = self.get_auto_remove_tmp_dir()
UpperCamelCase : Optional[Any] = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase : str = get_results(lowerCamelCase )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "translation_no_trainer" ) ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
UpperCamelCase : Dict = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase : Optional[Any] = get_results(lowerCamelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase : Tuple = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCamelCase : str = get_results(lowerCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "image_classification_no_trainer" ) ) )
| 435
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : int = int(number**0.5)
return number == sq * sq
def A__ ( A : int , A : int , A : int , A : int , A : int , A : int):
'''simple docstring'''
UpperCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase : int = x_den * y_den * z_den
UpperCamelCase : int = gcd(A , A)
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( A : int = 35):
'''simple docstring'''
UpperCamelCase : set = set()
UpperCamelCase : int
UpperCamelCase : Fraction = Fraction(0)
UpperCamelCase : tuple[int, int]
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
UpperCamelCase : Optional[int] = x_num * y_den + x_den * y_num
UpperCamelCase : str = x_den * y_den
UpperCamelCase : Dict = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Dict = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=2
UpperCamelCase : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase : List[str] = x_den * x_den * y_den * y_den
if is_sq(A) and is_sq(A):
UpperCamelCase : Dict = int(sqrt(A))
UpperCamelCase : Union[str, Any] = int(sqrt(A))
UpperCamelCase : Union[str, Any] = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[str] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=-1
UpperCamelCase : Union[str, Any] = x_num * y_num
UpperCamelCase : List[str] = x_den * y_num + x_num * y_den
UpperCamelCase : List[str] = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[str] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=2
UpperCamelCase : int = x_num * x_num * y_num * y_num
UpperCamelCase : Any = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A) and is_sq(A):
UpperCamelCase : Optional[int] = int(sqrt(A))
UpperCamelCase : Union[str, Any] = int(sqrt(A))
UpperCamelCase : str = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Union[str, Any] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
for num, den in unique_s:
total += Fraction(A , A)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Optional[Any] =logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[str] = 'encoder-decoder'
A_ : Dict = True
def __init__( self : Dict , **__UpperCamelCase : str ) -> Any:
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A = kwargs.pop('encoder' )
A = encoder_config.pop('model_type' )
A = kwargs.pop('decoder' )
A = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
A = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
A = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
A = True
@classmethod
def __UpperCamelCase ( cls : Optional[int] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : PretrainedConfig , **__UpperCamelCase : List[str] ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
A = True
A = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def __UpperCamelCase ( self : Dict ) -> List[str]:
A = copy.deepcopy(self.__dict__ )
A = self.encoder.to_dict()
A = self.decoder.to_dict()
A = self.__class__.model_type
return output
| 106
|
import argparse
from collections import defaultdict
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(lowerCAmelCase__ , 'r' ) as f:
A = f.readlines()
A = F'''class {class_name}('''
A = F'''{4 * " "}def {test_name}('''
A = F'''{8 * " "}{correct_line.split()[0]}'''
A = F'''{16 * " "}{correct_line.split()[0]}'''
A = False
A = False
A = False
A = False
A = 0
A = 0
A = []
for line in lines:
if line.startswith(lowerCAmelCase__ ):
A = True
elif in_class and line.startswith(lowerCAmelCase__ ):
A = True
elif in_class and in_func and (line.startswith(lowerCAmelCase__ ) or line.startswith(lowerCAmelCase__ )):
A = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * " "}{correct_line}''' )
A = A = A = A = False
else:
new_lines.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'w' ) as f:
for line in new_lines:
f.write(lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
if fail is not None:
with open(lowerCAmelCase__ , 'r' ) as f:
A = {l.strip() for l in f.readlines()}
else:
A = None
with open(lowerCAmelCase__ , 'r' ) as f:
A = f.readlines()
A = defaultdict(lowerCAmelCase__ )
for line in correct_lines:
A , A , A , A = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__snake_case :Optional[int] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 106
| 1
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
SCREAMING_SNAKE_CASE_ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_ = max(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ), b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
from math import factorial
def A__ ( __lowerCamelCase = 20 ):
SCREAMING_SNAKE_CASE_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ = n // 2
return int(factorial(__lowerCamelCase ) / (factorial(__lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 597
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCamelCase_ = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def _UpperCAmelCase ( UpperCamelCase: str=None ):
"""simple docstring"""
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser("tpu-config" , description=_description )
else:
__lowerCAmelCase = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
__lowerCAmelCase = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=UpperCamelCase , default=UpperCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=UpperCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=UpperCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__lowerCAmelCase = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=UpperCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def _UpperCAmelCase ( UpperCamelCase: List[Any] ):
"""simple docstring"""
__lowerCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
__lowerCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCAmelCase = defaults.commands
if not args.tpu_name:
__lowerCAmelCase = defaults.tpu_name
if not args.tpu_zone:
__lowerCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCAmelCase = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__lowerCAmelCase = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , UpperCamelCase ):
__lowerCAmelCase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
__lowerCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCamelCase ):
__lowerCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCAmelCase = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowerCAmelCase = "; ".join(UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCAmelCase = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(UpperCamelCase )}" )
return
subprocess.run(UpperCamelCase )
print("Successfully setup pod." )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = tpu_command_parser()
__lowerCAmelCase = parser.parse_args()
tpu_command_launcher(UpperCamelCase )
| 611
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a ( __UpperCAmelCase ):
lowercase_ : Any = 'wavlm'
def __init__( self : List[Any] , snake_case__ : int=32 , snake_case__ : Optional[int]=768 , snake_case__ : int=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Dict=0.0 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : str=0.0_2 , snake_case__ : Dict=1E-5 , snake_case__ : Union[str, Any]="group" , snake_case__ : List[Any]="gelu" , snake_case__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : str=False , snake_case__ : Dict=128 , snake_case__ : List[str]=16 , snake_case__ : Union[str, Any]=320 , snake_case__ : int=800 , snake_case__ : Optional[int]=False , snake_case__ : int=True , snake_case__ : Tuple=0.0_5 , snake_case__ : Any=10 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[Any]=0.0 , snake_case__ : Dict=10 , snake_case__ : Any=320 , snake_case__ : str=2 , snake_case__ : Any=0.1 , snake_case__ : int=100 , snake_case__ : str=256 , snake_case__ : Dict=256 , snake_case__ : List[Any]=0.1 , snake_case__ : Optional[Any]="mean" , snake_case__ : Tuple=False , snake_case__ : Dict=False , snake_case__ : Dict=256 , snake_case__ : Tuple=(512, 512, 512, 512, 1_500) , snake_case__ : Tuple=(5, 3, 3, 1, 1) , snake_case__ : str=(1, 2, 3, 1, 1) , snake_case__ : Any=512 , snake_case__ : List[Any]=80 , snake_case__ : Any=0 , snake_case__ : Tuple=1 , snake_case__ : List[str]=2 , snake_case__ : int=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Dict=3 , snake_case__ : Tuple=None , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_norm
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_buckets
__lowerCAmelCase = max_bucket_distance
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_ctc_classes
__lowerCAmelCase = vocab_size
__lowerCAmelCase = do_stable_layer_norm
__lowerCAmelCase = use_weighted_layer_sum
__lowerCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase = num_codevectors_per_group
__lowerCAmelCase = num_codevector_groups
__lowerCAmelCase = contrastive_logits_temperature
__lowerCAmelCase = num_negatives
__lowerCAmelCase = codevector_dim
__lowerCAmelCase = proj_codevector_dim
__lowerCAmelCase = diversity_loss_weight
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = xvector_output_dim
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 611
| 1
|
"""simple docstring"""
import math
__SCREAMING_SNAKE_CASE =10
__SCREAMING_SNAKE_CASE =7
__SCREAMING_SNAKE_CASE =BALLS_PER_COLOUR * NUM_COLOURS
def lowercase__( __SCREAMING_SNAKE_CASE : int = 20 ):
lowercase_ : Dict = math.comb(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 721
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase__( ):
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__SCREAMING_SNAKE_CASE , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('--batch_size' , type=__SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('--freeze' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
parser.add_argument('--learning_rate' , type=__SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('--seed' , type=__SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__SCREAMING_SNAKE_CASE , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('--weight_decay' , type=__SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('--output_dir' , type=__SCREAMING_SNAKE_CASE , default='./results' )
return parser.parse_args()
__SCREAMING_SNAKE_CASE =load("accuracy")
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ , lowercase_ : Any = eval_pred
lowercase_ : Optional[Any] = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
super().__init__()
lowercase_ : str = trainer
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
if control.should_evaluate:
lowercase_ : List[str] = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='train' )
return control_copy
def lowercase__( ):
lowercase_ : Dict = get_args()
set_seed(args.seed )
lowercase_ : str = load_dataset('codeparrot/codecomplex' , split='train' )
lowercase_ : Optional[int] = dataset.train_test_split(test_size=0.2 )
lowercase_ : str = train_test['test'].train_test_split(test_size=0.5 )
lowercase_ : Any = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
lowercase_ : int = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ : Optional[int] = tokenizer.eos_token
lowercase_ : int = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase_ : Union[str, Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase_ : str = False
lowercase_ : Tuple = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Dict = tokenizer(example['src'] , truncation=__SCREAMING_SNAKE_CASE , max_length=10_24 )
lowercase_ : int = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase_ : Optional[int] = train_test_validation.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['train'].column_names , )
lowercase_ : Any = DataCollatorWithPadding(tokenizer=__SCREAMING_SNAKE_CASE )
lowercase_ : int = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
lowercase_ : Optional[int] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , )
print('Training...' )
trainer.add_callback(CustomCallback(__SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 477
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : torch.FloatTensor
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self, A = 3, A = 3, A = ("DownEncoderBlock2D",), A = ("UpDecoderBlock2D",), A = (64,), A = 1, A = "silu", A = 3, A = 32, A = 256, A = 32, A = None, A = 0.1_82_15, A = "group", ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE : Optional[int] = Encoder(
in_channels=A, out_channels=A, down_block_types=A, block_out_channels=A, layers_per_block=A, act_fn=A, norm_num_groups=A, double_z=A, )
SCREAMING_SNAKE_CASE : Any = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE : str = nn.Convad(A, A, 1 )
SCREAMING_SNAKE_CASE : List[str] = VectorQuantizer(A, A, beta=0.25, remap=A, sane_index_shape=A )
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(A, A, 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE : str = Decoder(
in_channels=A, out_channels=A, up_block_types=A, block_out_channels=A, layers_per_block=A, act_fn=A, norm_num_groups=A, norm_type=A, )
@apply_forward_hook
def UpperCamelCase_ ( self, A, A = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder(A )
SCREAMING_SNAKE_CASE : Dict = self.quant_conv(A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=A )
@apply_forward_hook
def UpperCamelCase_ ( self, A, A = False, A = True ):
'''simple docstring'''
if not force_not_quantize:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.quantize(A )
else:
SCREAMING_SNAKE_CASE : List[Any] = h
SCREAMING_SNAKE_CASE : List[Any] = self.post_quant_conv(A )
SCREAMING_SNAKE_CASE : Tuple = self.decoder(A, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCamelCase_ ( self, A, A = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = sample
SCREAMING_SNAKE_CASE : List[str] = self.encode(A ).latents
SCREAMING_SNAKE_CASE : str = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 28
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = '''sew-d'''
def __init__(self : Tuple , A__ : str=3_2 , A__ : str=7_6_8 , A__ : Union[str, Any]=1_2 , A__ : List[str]=1_2 , A__ : List[Any]=3_0_7_2 , A__ : List[Any]=2 , A__ : Dict=5_1_2 , A__ : Any=2_5_6 , A__ : str=True , A__ : Dict=True , A__ : str=("p2c", "c2p") , A__ : Optional[int]="layer_norm" , A__ : Optional[int]="gelu_python" , A__ : List[Any]=0.1 , A__ : List[str]=0.1 , A__ : Optional[Any]=0.1 , A__ : Optional[Any]=0.0 , A__ : str=0.1 , A__ : Optional[int]=0.0_2 , A__ : Union[str, Any]=1e-7 , A__ : List[str]=1e-5 , A__ : Union[str, Any]="group" , A__ : List[Any]="gelu" , A__ : Dict=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A__ : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__ : Optional[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__ : Optional[Any]=False , A__ : List[str]=1_2_8 , A__ : int=1_6 , A__ : List[str]=True , A__ : Dict=0.0_5 , A__ : Any=1_0 , A__ : str=2 , A__ : Optional[int]=0.0 , A__ : str=1_0 , A__ : List[Any]=0 , A__ : Tuple="mean" , A__ : Union[str, Any]=False , A__ : Optional[Any]=False , A__ : Optional[int]=2_5_6 , A__ : Dict=0 , A__ : List[str]=1 , A__ : str=2 , **A__ : Tuple , ) -> List[Any]:
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = squeeze_factor
lowercase = max_position_embeddings
lowercase = position_buckets
lowercase = share_att_key
lowercase = relative_attention
lowercase = norm_rel_ebd
lowercase = list(A__ )
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layer_norm_eps
lowercase = feature_layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# sequence classification
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
@property
def UpperCAmelCase__ (self : str ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 310
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= parent
SCREAMING_SNAKE_CASE__: List[Any]= batch_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= seq_length
SCREAMING_SNAKE_CASE__: Dict= is_training
SCREAMING_SNAKE_CASE__: Any= use_input_mask
SCREAMING_SNAKE_CASE__: int= use_token_type_ids
SCREAMING_SNAKE_CASE__: Tuple= use_labels
SCREAMING_SNAKE_CASE__: str= vocab_size
SCREAMING_SNAKE_CASE__: Optional[Any]= hidden_size
SCREAMING_SNAKE_CASE__: Any= num_hidden_layers
SCREAMING_SNAKE_CASE__: Tuple= num_attention_heads
SCREAMING_SNAKE_CASE__: List[str]= intermediate_size
SCREAMING_SNAKE_CASE__: Optional[int]= hidden_act
SCREAMING_SNAKE_CASE__: Tuple= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: int= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_position_embeddings
SCREAMING_SNAKE_CASE__: str= type_vocab_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= type_sequence_label_size
SCREAMING_SNAKE_CASE__: List[Any]= initializer_range
SCREAMING_SNAKE_CASE__: List[str]= num_labels
SCREAMING_SNAKE_CASE__: List[Any]= num_choices
SCREAMING_SNAKE_CASE__: Any= relative_attention
SCREAMING_SNAKE_CASE__: Tuple= position_biased_input
SCREAMING_SNAKE_CASE__: Dict= pos_att_type
SCREAMING_SNAKE_CASE__: str= scope
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: List[Any]= None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__: Optional[int]= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__: str= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Any= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: str= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__: str= DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Dict= TFDebertaVaModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__: Dict= [input_ids, input_mask]
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= TFDebertaVaForMaskedLM(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE__: Optional[Any]= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: str= self.num_labels
SCREAMING_SNAKE_CASE__: Tuple= TFDebertaVaForSequenceClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Any= self.num_labels
SCREAMING_SNAKE_CASE__: Any= TFDebertaVaForTokenClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Any= TFDebertaVaForQuestionAnswering(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE__: Optional[int]= model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: List[Any]= self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): Any= config_and_inputs
SCREAMING_SNAKE_CASE__: List[Any]= {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__a = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[str]= TFDebertaVaModelTester(self )
SCREAMING_SNAKE_CASE__: str= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Optional[Any]= TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(lowerCAmelCase )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Any= TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
SCREAMING_SNAKE_CASE__: Optional[int]= tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
SCREAMING_SNAKE_CASE__: List[Any]= tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Any= tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1e-4 )
| 107
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : List[str] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['MaskFormerFeatureExtractor']
lowercase_ : Dict = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
lowercase_ : Optional[Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 107
| 1
|
def UpperCamelCase ( _A : int , _A : int )-> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
a_ = {"""allegro/herbert-base-cased""": 514}
a_ = {}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Tuple = HerbertTokenizer
def __init__( self: Optional[Any] , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: str="<s>" , __lowerCAmelCase: List[str]="<unk>" , __lowerCAmelCase: Optional[int]="<pad>" , __lowerCAmelCase: Optional[Any]="<mask>" , __lowerCAmelCase: Union[str, Any]="</s>" , **__lowerCAmelCase: List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None , __lowerCAmelCase: bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _UpperCAmelCase ( self: int , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: str , __lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 221
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = "git_vision_model"
def __init__( self : int , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : Any=3072 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : str="quick_gelu" , UpperCAmelCase_ : List[Any]=1E-5 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , **UpperCAmelCase_ : str , ):
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def _A ( cls : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
cls._set_token_in_kwargs(_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
SCREAMING_SNAKE_CASE : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase_ : Dict = "git"
def __init__( self : List[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=3_0522 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Dict=6 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=1024 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]="absolute" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=101 , UpperCAmelCase_ : Optional[Any]=102 , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple , ):
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , pad_token_id=_lowerCamelCase , **_lowerCamelCase )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
SCREAMING_SNAKE_CASE : Any = GitVisionConfig(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
| 712
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = CLIPConfig
UpperCamelCase_ : Optional[int] = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCAmelCase_ : CLIPConfig ):
super().__init__(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = CLIPVisionModelWithProjection(config.vision_config )
SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=0.5 , UpperCAmelCase_ : Optional[Any]=0.5 ):
SCREAMING_SNAKE_CASE : str = self.vision_model(UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.p_head(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = nsfw_detected.flatten()
SCREAMING_SNAKE_CASE : List[str] = nsfw_detected > p_threshold
SCREAMING_SNAKE_CASE : List[Any] = nsfw_detected.tolist()
if any(UpperCAmelCase_ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase_ ):
if nsfw_detected_:
SCREAMING_SNAKE_CASE : Dict = np.zeros(images[idx].shape )
SCREAMING_SNAKE_CASE : Any = self.w_head(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = watermark_detected.flatten()
SCREAMING_SNAKE_CASE : Union[str, Any] = watermark_detected > w_threshold
SCREAMING_SNAKE_CASE : Optional[int] = watermark_detected.tolist()
if any(UpperCAmelCase_ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(UpperCAmelCase_ ):
if watermark_detected_:
SCREAMING_SNAKE_CASE : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 488
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = '''pt'''
elif is_tf_available():
__UpperCAmelCase = '''tf'''
else:
__UpperCAmelCase = '''jax'''
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = ByTaTokenizer
lowercase__ : Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().setUp()
lowerCAmelCase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=20 , lowerCamelCase_=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase__ = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCAmelCase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ = list(filter(lambda lowerCamelCase_ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , lowerCamelCase_ ) )
lowerCAmelCase__ = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_ ) , lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCAmelCase__ = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCAmelCase__ = ''' ''' + output_txt
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCAmelCase__ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = '''Unicode €.'''
lowerCAmelCase__ = tokenizer(lowerCamelCase_ )
lowerCAmelCase__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '''Unicode €.</s>''' )
lowerCAmelCase__ = tokenizer('''e è é ê ë''' )
lowerCAmelCase__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCAmelCase__ = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase__ = tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization. </s>''']
lowerCAmelCase__ = ['''Summary of the text. </s>''']
# fmt: off
lowerCAmelCase__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
lowerCAmelCase__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , text_target=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch['''input_ids'''][0] )
self.assertEqual(lowerCamelCase_ , batch['''labels'''][0] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCAmelCase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase__ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
lowerCAmelCase__ = [F"""<extra_id_{i}>""" for i in range(1_25 )]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCamelCase_ )]
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_class.from_pretrained(lowerCamelCase_ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCAmelCase__ = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 90
|
__snake_case : str ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Optional[Any] =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : List[str] ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 647
| 0
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(F"""Building PyTorch model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : List[Any] = BertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 713
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T
_SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def lowerCamelCase_()-> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 635
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : List[Any] = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : str = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Dict = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Any = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[str] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Any = "weight"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split(".")
SCREAMING_SNAKE_CASE : Any = int(items[0])
SCREAMING_SNAKE_CASE : Any = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=None , _a=None , _a=True):
if config_path is not None:
SCREAMING_SNAKE_CASE : Tuple = UniSpeechSatConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE : List[str] = ""
if is_finetuned:
SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForCTC(_a)
else:
SCREAMING_SNAKE_CASE : str = UniSpeechSatForPreTraining(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
SCREAMING_SNAKE_CASE : Union[str, Any] = model[0].eval()
recursively_load_weights(_a , _a)
hf_wavavec.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25
|
from __future__ import annotations
import numpy as np
def __magic_name__ ( lowercase ) -> Tuple:
"""simple docstring"""
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 458
| 0
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : Path , lowerCamelCase : Union[str, None] = None , lowerCamelCase : Union[List[str], None] = None , lowerCamelCase : Union[str, List[str], None] = None , lowerCamelCase : bool = True , )-> Dict:
snake_case__ : int = [file for file in os.listdir(lowerCamelCase ) if os.path.isfile(os.path.join(lowerCamelCase , lowerCamelCase ) )]
if identifier is not None:
snake_case__ : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase , lowerCamelCase ):
for n_ in n_identifier:
snake_case__ : Union[str, Any] = [file for file in files if n_ not in file]
else:
snake_case__ : Optional[Any] = [file for file in files if n_identifier not in file]
snake_case__ : Tuple = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case__ : int = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCamelCase )
if only_modules:
snake_case__ : Union[str, Any] = file.split(""".""" )[0]
try:
snake_case__ : Any = getattr(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = doctest.DocTestSuite(lowerCamelCase )
snake_case__ : int = unittest.TextTestRunner().run(lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
snake_case__ : List[Any] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCAmelCase ( self : Tuple )-> List[str]:
snake_case__ : Optional[int] = Path("""src/transformers""" )
snake_case__ : Optional[Any] = """modeling"""
snake_case__ : Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase , ignore_files=lowerCamelCase )
def __lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case__ : Optional[Any] = Path("""src/transformers""" )
snake_case__ : Any = """tokenization"""
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> Dict:
snake_case__ : Any = Path("""src/transformers""" )
snake_case__ : List[Any] = """configuration"""
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> Tuple:
snake_case__ : int = Path("""src/transformers""" )
snake_case__ : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase , n_identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
snake_case__ : List[Any] = Path("""docs/source""" )
snake_case__ : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase , ignore_files=lowerCamelCase , only_modules=lowerCamelCase )
| 172
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = 42
_lowercase = 42
class _A ( nn.Module ):
'''simple docstring'''
_lowercase = 42
_lowercase = (16, 32, 96, 256)
_lowercase = jnp.floataa
def __lowerCAmelCase ( self : Tuple )-> Any:
snake_case__ : Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case__ : List[str] = self.block_out_channels[i]
snake_case__ : Union[str, Any] = self.block_out_channels[i + 1]
snake_case__ : Optional[int] = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase )
snake_case__ : int = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase )
snake_case__ : Any = blocks
snake_case__ : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCamelCase : Any )-> Tuple:
snake_case__ : int = self.conv_in(lowerCamelCase )
snake_case__ : Dict = nn.silu(lowerCamelCase )
for block in self.blocks:
snake_case__ : Dict = block(lowerCamelCase )
snake_case__ : str = nn.silu(lowerCamelCase )
snake_case__ : Union[str, Any] = self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class _A ( nn.Module , UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
_lowercase = 32
_lowercase = 4
_lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase = False
_lowercase = (320, 640, 1280, 1280)
_lowercase = 2
_lowercase = 8
_lowercase = None
_lowercase = 1280
_lowercase = 0.0
_lowercase = False
_lowercase = jnp.floataa
_lowercase = True
_lowercase = 0
_lowercase = "rgb"
_lowercase = (16, 32, 96, 256)
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
snake_case__ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : Dict = jnp.zeros(lowerCamelCase , dtype=jnp.floataa )
snake_case__ : str = jnp.ones((1,) , dtype=jnp.intaa )
snake_case__ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case__ : Dict = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case__ : Optional[Any] = jnp.zeros(lowerCamelCase , dtype=jnp.floataa )
snake_case__ , snake_case__ : List[Any] = jax.random.split(lowerCamelCase )
snake_case__ : Optional[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )["params"]
def __lowerCAmelCase ( self : int )-> int:
snake_case__ : List[Any] = self.block_out_channels
snake_case__ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : Any = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case__ : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case__ : Union[str, Any] = FlaxTimestepEmbedding(lowerCamelCase , dtype=self.dtype )
snake_case__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case__ : Union[str, Any] = self.only_cross_attention
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : Optional[Any] = []
snake_case__ : str = []
snake_case__ : Tuple = block_out_channels[0]
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : Optional[int] = output_channel
snake_case__ : int = block_out_channels[i]
snake_case__ : Union[str, Any] = i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : int = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case__ : Any = FlaxDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
snake_case__ : str = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = down_blocks
snake_case__ : List[str] = controlnet_down_blocks
# mid
snake_case__ : Union[str, Any] = block_out_channels[-1]
snake_case__ : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : float = 1.0 , lowerCamelCase : bool = True , lowerCamelCase : bool = False , )-> Union[FlaxControlNetOutput, Tuple]:
snake_case__ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case__ : Union[str, Any] = jnp.flip(lowerCamelCase , axis=1 )
# 1. time
if not isinstance(lowerCamelCase , jnp.ndarray ):
snake_case__ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
snake_case__ : Optional[int] = jnp.expand_dims(lowerCamelCase , 0 )
snake_case__ : Any = self.time_proj(lowerCamelCase )
snake_case__ : List[Any] = self.time_embedding(lowerCamelCase )
# 2. pre-process
snake_case__ : Dict = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
snake_case__ : Any = self.conv_in(lowerCamelCase )
snake_case__ : Dict = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
snake_case__ : str = self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
snake_case__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ , snake_case__ : List[str] = down_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
else:
snake_case__ , snake_case__ : List[str] = down_block(lowerCamelCase , lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case__ : Optional[Any] = self.mid_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
# 5. contronet blocks
snake_case__ : List[str] = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase , self.controlnet_down_blocks ):
snake_case__ : List[str] = controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case__ : Optional[Any] = controlnet_down_block_res_samples
snake_case__ : Optional[int] = self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
snake_case__ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase , mid_block_res_sample=lowerCamelCase )
| 172
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : List[Any]=7 , snake_case_ : Optional[int]=True , snake_case_ : Tuple=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : Any=True , snake_case_ : Any=False , snake_case_ : Union[str, Any]=False , snake_case_ : Dict=False , snake_case_ : str=2 , snake_case_ : Optional[int]=99 , snake_case_ : Any=0 , snake_case_ : Dict=32 , snake_case_ : Optional[Any]=5 , snake_case_ : Optional[Any]=4 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.1 , snake_case_ : str=5_12 , snake_case_ : List[str]=12 , snake_case_ : List[Any]=2 , snake_case_ : Optional[int]=0.0_2 , snake_case_ : Dict=3 , snake_case_ : Dict=4 , snake_case_ : Any="last" , snake_case_ : List[Any]=None , snake_case_ : int=None , )-> List[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_input_lengths
__lowerCAmelCase =use_token_type_ids
__lowerCAmelCase =use_labels
__lowerCAmelCase =gelu_activation
__lowerCAmelCase =sinusoidal_embeddings
__lowerCAmelCase =causal
__lowerCAmelCase =asm
__lowerCAmelCase =n_langs
__lowerCAmelCase =vocab_size
__lowerCAmelCase =n_special
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =type_vocab_size
__lowerCAmelCase =type_sequence_label_size
__lowerCAmelCase =initializer_range
__lowerCAmelCase =num_labels
__lowerCAmelCase =num_choices
__lowerCAmelCase =summary_type
__lowerCAmelCase =use_proj
__lowerCAmelCase =scope
def UpperCamelCase ( self : List[Any])-> str:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase =None
if self.use_input_lengths:
__lowerCAmelCase =(
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase =None
if self.use_token_type_ids:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__lowerCAmelCase =None
__lowerCAmelCase =None
__lowerCAmelCase =None
if self.use_labels:
__lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase =ids_tensor([self.batch_size] , 2).float()
__lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self : Union[str, Any])-> Union[str, Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , )-> Tuple:
__lowerCAmelCase =FlaubertModel(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , lengths=snake_case_ , langs=snake_case_)
__lowerCAmelCase =model(snake_case_ , langs=snake_case_)
__lowerCAmelCase =model(snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : str , )-> str:
__lowerCAmelCase =FlaubertWithLMHeadModel(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Any , )-> Tuple:
__lowerCAmelCase =FlaubertForQuestionAnsweringSimple(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_)
__lowerCAmelCase =model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[Any] , )-> Optional[int]:
__lowerCAmelCase =FlaubertForQuestionAnswering(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_)
__lowerCAmelCase =model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
__lowerCAmelCase =model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
((__lowerCAmelCase) , ) =result_with_labels.to_tuple()
__lowerCAmelCase =model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_)
((__lowerCAmelCase) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def UpperCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , snake_case_ : str , snake_case_ : Dict , )-> Tuple:
__lowerCAmelCase =FlaubertForSequenceClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_)
__lowerCAmelCase =model(snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Tuple , )-> Any:
__lowerCAmelCase =self.num_labels
__lowerCAmelCase =FlaubertForTokenClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , )-> Optional[int]:
__lowerCAmelCase =self.num_choices
__lowerCAmelCase =FlaubertForMultipleChoice(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase =model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase ( self : int)-> Dict:
__lowerCAmelCase =self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) =config_and_inputs
__lowerCAmelCase ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self : List[str] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Dict)-> str:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self : int , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : int=False)-> Optional[Any]:
__lowerCAmelCase =super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowerCAmelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_)
__lowerCAmelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_)
return inputs_dict
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase =FlaubertModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , emb_dim=37)
def UpperCamelCase ( self : int)-> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any)-> Tuple:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case_)
def UpperCamelCase ( self : Union[str, Any])-> List[str]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case_)
def UpperCamelCase ( self : Any)-> Any:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case_)
def UpperCamelCase ( self : Optional[int])-> List[str]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case_)
def UpperCamelCase ( self : List[Any])-> Optional[Any]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case_)
def UpperCamelCase ( self : Tuple)-> Union[str, Any]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case_)
def UpperCamelCase ( self : List[Any])-> Any:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case_)
@slow
def UpperCamelCase ( self : List[str])-> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase =FlaubertModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
@slow
@require_torch_gpu
def UpperCamelCase ( self : Dict)-> str:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowerCAmelCase =True
__lowerCAmelCase =model_class(config=snake_case_)
__lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_)
__lowerCAmelCase =torch.jit.trace(
snake_case_ , (inputs_dict["""input_ids"""].to("""cpu"""), inputs_dict["""attention_mask"""].to("""cpu""")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case_ , os.path.join(snake_case_ , """traced_model.pt"""))
__lowerCAmelCase =torch.jit.load(os.path.join(snake_case_ , """traced_model.pt""") , map_location=snake_case_)
loaded(inputs_dict["""input_ids"""].to(snake_case_) , inputs_dict["""attention_mask"""].to(snake_case_))
@require_torch
class __a ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Tuple)-> str:
__lowerCAmelCase =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""")
__lowerCAmelCase =torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
__lowerCAmelCase =model(snake_case_)[0]
__lowerCAmelCase =torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , snake_case_)
__lowerCAmelCase =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4))
| 354
|
from __future__ import annotations
class __a :
def __init__( self : List[Any] , snake_case_ : str , snake_case_ : str)-> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase =text, pattern
__lowerCAmelCase , __lowerCAmelCase =len(snake_case_), len(snake_case_)
def UpperCamelCase ( self : List[Any] , snake_case_ : str)-> int:
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase ( self : List[str] , snake_case_ : int)-> int:
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase ( self : Dict)-> list[int]:
# searches pattern in text and returns index positions
__lowerCAmelCase =[]
for i in range(self.textLen - self.patLen + 1):
__lowerCAmelCase =self.mismatch_in_text(snake_case_)
if mismatch_index == -1:
positions.append(snake_case_)
else:
__lowerCAmelCase =self.match_in_pattern(self.text[mismatch_index])
__lowerCAmelCase =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase_ = '''ABAABA'''
lowercase_ = '''AB'''
lowercase_ = BoyerMooreSearch(text, pattern)
lowercase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 354
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCamelCase = 1.054571817e-34 # unit of ℏ : J * s
_lowerCamelCase = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
SCREAMING_SNAKE_CASE__ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
SCREAMING_SNAKE_CASE__ = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
SCREAMING_SNAKE_CASE__ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = OpenAIGPTTokenizer
lowerCamelCase_ = OpenAIGPTTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def _snake_case ( self :Union[str, Any] , __A :str ) -> List[Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = """lower"""
SCREAMING_SNAKE_CASE__ = ["""low""", """er</w>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokens + ["""<unk>"""]
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self :Optional[Any] , __A :Optional[Any]=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
SCREAMING_SNAKE_CASE__ = """This is a simple input"""
SCREAMING_SNAKE_CASE__ = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ ):
pass
| 59
| 0
|
'''simple docstring'''
import argparse
import os
import re
_SCREAMING_SNAKE_CASE = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_SCREAMING_SNAKE_CASE = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_SCREAMING_SNAKE_CASE = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = content.split("\n" )
_lowerCAmelCase = []
_lowerCAmelCase = 0
while line_idx < len(SCREAMING_SNAKE_CASE_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_lowerCAmelCase = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_lowerCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_lowerCAmelCase = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_lowerCAmelCase = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : _re_identifier.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE_ ) )
elif "\n".join(SCREAMING_SNAKE_CASE_ ) != content:
return True
def __a(SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for f in os.listdir(SCREAMING_SNAKE_CASE_ ) if f.endswith(".py" )]
_lowerCAmelCase = [sort_auto_mapping(SCREAMING_SNAKE_CASE_ , overwrite=SCREAMING_SNAKE_CASE_ ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = [f for f, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(SCREAMING_SNAKE_CASE_ )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 18
|
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__a : str = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__a : int = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(lowercase )
# emb -> embedding
if name.startswith('''emb.''' ):
__lowercase = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__lowercase = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , lowercase )
# ffn -> feed_forward
__lowercase = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__lowercase = '''rwkv.''' + name
__lowercase = weight
return state_dict
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=False , lowercase=None ):
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__lowercase = 50277
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=lowercase )
__lowercase = len(lowercase )
tokenizer.save_pretrained(lowercase )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(lowercase , lowercase )
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = convert_state_dict(lowercase )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(lowercase )
for shard_file, shard in shards.items():
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if index is not None:
__lowercase = os.path.join(lowercase , lowercase )
# Save the index as well
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + '''\n'''
f.write(lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(lowercase , lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase , lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__lowercase = AutoModelForCausalLM.from_pretrained(lowercase )
model.push_to_hub(lowercase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowercase )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__a : Any = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 522
|
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return data[1:] + data[0]
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
for i in range(len(lowercase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = int('''0b''' + data[0] + data[-1] , 2 )
__lowercase = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = message[:4]
__lowercase = message[4:]
__lowercase = apply_table(lowercase , lowercase )
__lowercase = xor(lowercase , lowercase )
__lowercase = apply_sbox(lowercase , temp[:4] ) # noqa: E741
__lowercase = apply_sbox(lowercase , temp[4:] )
__lowercase = '''0''' * (2 - len(lowercase )) + l # noqa: E741
__lowercase = '''0''' * (2 - len(lowercase )) + r
__lowercase = apply_table(l + r , lowercase )
__lowercase = xor(lowercase , lowercase )
return temp + right
if __name__ == "__main__":
__a : str = input("""Enter 10 bit key: """)
__a : Optional[Any] = input("""Enter 8 bit message: """)
__a : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__a : Tuple = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__a : Optional[int] = [2, 4, 3, 1]
__a : Dict = [2, 6, 3, 1, 4, 8, 5, 7]
__a : Any = [4, 1, 3, 5, 7, 2, 8, 6]
__a : int = [4, 1, 2, 3, 2, 3, 4, 1]
__a : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__a : Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__a : Dict = apply_table(key, paa_table)
__a : Any = temp[:5]
__a : Optional[int] = temp[5:]
__a : List[Any] = left_shift(left)
__a : Any = left_shift(right)
__a : List[Any] = apply_table(left + right, pa_table)
__a : Any = left_shift(left)
__a : Dict = left_shift(right)
__a : Tuple = left_shift(left)
__a : List[Any] = left_shift(right)
__a : Any = apply_table(left + right, pa_table)
# encryption
__a : Dict = apply_table(message, IP)
__a : Optional[Any] = function(expansion, sa, sa, keya, temp)
__a : int = temp[4:] + temp[:4]
__a : Optional[int] = function(expansion, sa, sa, keya, temp)
__a : List[Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__a : Optional[Any] = apply_table(CT, IP)
__a : List[str] = function(expansion, sa, sa, keya, temp)
__a : Tuple = temp[4:] + temp[:4]
__a : List[Any] = function(expansion, sa, sa, keya, temp)
__a : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 522
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_snake_case = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = torch.load(snake_case__, map_location="cpu" )
return sd
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=rename_keys_prefix ) -> List[Any]:
__UpperCAmelCase : Optional[int] = OrderedDict()
__UpperCAmelCase : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[int] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0], name_pair[1] )
__UpperCAmelCase : Optional[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : int = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Tuple = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 512}
__UpperCAmelCase : List[str] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 2048}
__UpperCAmelCase : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : Optional[int] = "nlvr"
__UpperCAmelCase : Optional[int] = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCAmelCase : str = load_state_dict(snake_case__ )
__UpperCAmelCase : int = get_new_dict(snake_case__, snake_case__ )
if model_type == "pretraining":
__UpperCAmelCase : Union[str, Any] = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCAmelCase : Union[str, Any] = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCAmelCase : str = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCAmelCase : int = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 382
|
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
if not isinstance(snake_case__, snake_case__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(snake_case__, snake_case__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__UpperCAmelCase : Any = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase__ ( lowercase__ ):
A : Optional[int] = '''data2vec-text'''
def __init__(self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowercase_ : Optional[Any] = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Optional[Any] = type_vocab_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : List[Any] = use_cache
lowercase_ : int = classifier_dropout
class UpperCAmelCase__ ( lowercase__ ):
@property
def _lowerCamelCase (self ) -> int:
if self.task == "multiple-choice":
lowercase_ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 705
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : Any = tempfile.mkdtemp()
lowercase_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase_ : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_a , _a )
def _lowerCamelCase (self , **_a ) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , **_a ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , **_a ) -> int:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase (self ) -> str:
lowercase_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : str = self.get_tokenizer()
lowercase_ : Tuple = self.get_rust_tokenizer()
lowercase_ : int = self.get_image_processor()
lowercase_ : int = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
lowercase_ : Optional[Any] = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : Any = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : str = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase_ : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowercase_ : Optional[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : Union[str, Any] = self.get_image_processor()
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : Optional[int] = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Union[str, Any] = self.prepare_image_inputs()
lowercase_ : Union[str, Any] = image_processor(_a , return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=_a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : List[Any] = self.get_image_processor()
lowercase_ : int = self.get_tokenizer()
lowercase_ : Any = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Tuple = 'lower newer'
lowercase_ : Optional[Any] = processor(text=_a )
lowercase_ : List[Any] = tokenizer(_a , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : Any = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[str] = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Optional[Any] = 'lower newer'
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : List[str] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Dict = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : Optional[Any] = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : str = processor.batch_decode(_a )
lowercase_ : Dict = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : str = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Optional[int] = 'lower newer'
lowercase_ : Union[str, Any] = self.prepare_image_inputs()
lowercase_ : Union[str, Any] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 438
| 0
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : int , __snake_case : UNetaDModel , __snake_case : UNetaDModel , __snake_case : DDPMScheduler , __snake_case : str , ) -> Tuple:
super().__init__()
__magic_name__: Dict = value_function
__magic_name__: Optional[Any] = unet
__magic_name__: Dict = scheduler
__magic_name__: Optional[int] = env
__magic_name__: List[Any] = env.get_dataset()
__magic_name__: str = {}
for key in self.data.keys():
try:
__magic_name__: Any = self.data[key].mean()
except: # noqa: E722
pass
__magic_name__: Optional[int] = {}
for key in self.data.keys():
try:
__magic_name__: Dict = self.data[key].std()
except: # noqa: E722
pass
__magic_name__: List[str] = env.observation_space.shape[0]
__magic_name__: Any = env.action_space.shape[0]
def lowerCamelCase__ ( self : Any , __snake_case : Dict , __snake_case : int ) -> Optional[int]:
return (x_in - self.means[key]) / self.stds[key]
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] ) -> Optional[int]:
return x_in * self.stds[key] + self.means[key]
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[Any] ) -> Tuple:
if type(__snake_case ) is dict:
return {k: self.to_torch(__snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(__snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(__snake_case , device=self.unet.device )
def lowerCamelCase__ ( self : str , __snake_case : str , __snake_case : Dict , __snake_case : Tuple ) -> Optional[Any]:
for key, val in cond.items():
__magic_name__: int = val.clone()
return x_in
def lowerCamelCase__ ( self : Any , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ) -> List[Any]:
__magic_name__: str = x.shape[0]
__magic_name__: Optional[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__magic_name__: Dict = torch.full((batch_size,) , __snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(__snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__magic_name__: int = self.value_function(x.permute(0 , 2 , 1 ) , __snake_case ).sample
__magic_name__: str = torch.autograd.grad([y.sum()] , [x] )[0]
__magic_name__: Optional[int] = self.scheduler._get_variance(__snake_case )
__magic_name__: Optional[Any] = torch.exp(0.5 * posterior_variance )
__magic_name__: List[Any] = model_std * grad
__magic_name__: List[str] = 0
__magic_name__: Any = x.detach()
__magic_name__: Dict = x + scale * grad
__magic_name__: Optional[Any] = self.reset_xa(__snake_case , __snake_case , self.action_dim )
__magic_name__: int = self.unet(x.permute(0 , 2 , 1 ) , __snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
__magic_name__: Dict = self.scheduler.step(__snake_case , __snake_case , __snake_case , predict_epsilon=__snake_case )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
__magic_name__: Dict = self.reset_xa(__snake_case , __snake_case , self.action_dim )
__magic_name__: Union[str, Any] = self.to_torch(__snake_case )
return x, y
def __call__( self : Optional[Any] , __snake_case : int , __snake_case : Tuple=6_4 , __snake_case : Union[str, Any]=3_2 , __snake_case : str=2 , __snake_case : Any=0.1 ) -> int:
# normalize the observations and create batch dimension
__magic_name__: List[str] = self.normalize(__snake_case , """observations""" )
__magic_name__: Optional[int] = obs[None].repeat(__snake_case , axis=0 )
__magic_name__: str = {0: self.to_torch(__snake_case )}
__magic_name__: Any = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__magic_name__: int = randn_tensor(__snake_case , device=self.unet.device )
__magic_name__: Union[str, Any] = self.reset_xa(__snake_case , __snake_case , self.action_dim )
__magic_name__: int = self.to_torch(__snake_case )
# run the diffusion process
__magic_name__, __magic_name__: int = self.run_diffusion(__snake_case , __snake_case , __snake_case , __snake_case )
# sort output trajectories by value
__magic_name__: List[str] = y.argsort(0 , descending=__snake_case ).squeeze()
__magic_name__: List[str] = x[sorted_idx]
__magic_name__: Dict = sorted_values[:, :, : self.action_dim]
__magic_name__: str = actions.detach().cpu().numpy()
__magic_name__: Optional[Any] = self.de_normalize(__snake_case , key="""actions""" )
# select the action with the highest value
if y is not None:
__magic_name__: Tuple = 0
else:
# if we didn't run value guiding, select a random action
__magic_name__: List[str] = np.random.randint(0 , __snake_case )
__magic_name__: int = denorm_actions[selected_index, 0]
return denorm_actions
| 96
|
"""simple docstring"""
import requests
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase__ : List[str] = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : List[str] = requests.post(lowerCAmelCase , json={"""text""": message_body} , headers=lowerCAmelCase )
if response.status_code != 2_00:
UpperCAmelCase__ : str = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 182
| 0
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCAmelCase__ ( lowercase__="" ) -> Tuple:
__lowercase = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = torch.rand(12 , dtype=torch.floataa ) - 0.5
__lowercase = AgentAudio(lowercase )
__lowercase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase ) )
# Ensure that the file contains the same value as the original tensor
__lowercase = sf.read(lowercase )
self.assertTrue(torch.allclose(lowercase , torch.tensor(lowercase ) , atol=1E-4 ) )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = torch.rand(12 , dtype=torch.floataa ) - 0.5
__lowercase = get_new_path(suffix=""".wav""" )
sf.write(lowercase , lowercase , 16_000 )
__lowercase = AgentAudio(lowercase )
self.assertTrue(torch.allclose(lowercase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase )
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = torch.randint(0 , 256 , (64, 64, 3) )
__lowercase = AgentImage(lowercase )
__lowercase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase ) )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / "000000039769.png"
__lowercase = Image.open(lowercase )
__lowercase = AgentImage(lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase ) )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / "000000039769.png"
__lowercase = Image.open(lowercase )
__lowercase = AgentImage(lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase ) )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = "Hey!"
__lowercase = AgentText(lowercase )
self.assertEqual(lowercase , agent_type.to_string() )
self.assertEqual(lowercase , agent_type.to_raw() )
self.assertEqual(lowercase , lowercase )
| 704
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 79
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : List[Any] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ["""ChineseCLIPFeatureExtractor"""]
__lowerCamelCase : List[str] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629
| 0
|
"""simple docstring"""
from math import pi
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 715
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 20}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_flip_channel_order
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_flip_channel_order'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 78
| 0
|
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase : List[Any] = ""
__lowerCamelCase : Any = ""
__lowerCamelCase : Tuple = ""
__lowerCamelCase : Optional[Any] = ""
def lowerCamelCase_(lowerCamelCase_ ) -> None:
# authorize twitter, initialize tweepy
UpperCAmelCase = tweepy.OAuthHandler(lowerCamelCase_ , lowerCamelCase_ )
auth.set_access_token(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = tweepy.API(lowerCamelCase_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase = api.user_timeline(screen_name=lowerCamelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCamelCase_ )
# save the id of the oldest tweet less one
UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase_ ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase = api.user_timeline(
screen_name=lowerCamelCase_ , count=200 , max_id=lowerCamelCase_ )
# save most recent tweets
alltweets.extend(lowerCamelCase_ )
# update the id of the oldest tweet less one
UpperCAmelCase = alltweets[-1].id - 1
print(F'...{len(lowerCamelCase_ )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' , "w" ) as f:
UpperCAmelCase = csv.writer(lowerCamelCase_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowerCamelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 323
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =RobertaTokenizer
lowercase : Dict =RobertaTokenizerFast
lowercase : Union[str, Any] =True
lowercase : Dict ={'''cls_token''': '''<s>'''}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , **UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = "lower newer"
UpperCAmelCase = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase = "lower newer"
UpperCAmelCase = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCamelCase__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCamelCase__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained("roberta-base" )
UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = "Encode this sequence."
UpperCAmelCase = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
UpperCAmelCase = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
UpperCAmelCase = "Encode <mask> sequence"
UpperCAmelCase = "Encode <mask>sequence"
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ )
UpperCAmelCase = encoded.index(UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ )
UpperCAmelCase = encoded.index(UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCamelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , UpperCamelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase = F'{text_of_1_token} {text_of_1_token}'
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 323
| 1
|
def A__ ( lowercase: Tuple, lowercase: int ) -> List[Any]:
A : int =0
A : str =len(lowercase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : List[str] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase ):
return None
A : Union[str, Any] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A : Union[str, Any] =left
A : Optional[Any] =point
elif point > right:
A : List[Any] =right
A : int =point
else:
if item < current_item:
A : Dict =point - 1
else:
A : Tuple =point + 1
return None
def A__ ( lowercase: Optional[Any], lowercase: Optional[int], lowercase: str, lowercase: Optional[int] ) -> int:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : Tuple =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase, lowercase, lowercase, lowercase )
elif point > right:
return interpolation_search_by_recursion(lowercase, lowercase, lowercase, lowercase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase, lowercase, lowercase, point - 1 )
else:
return interpolation_search_by_recursion(
lowercase, lowercase, point + 1, lowercase )
def A__ ( lowercase: Tuple ) -> Any:
if collection != sorted(lowercase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_lowercase : List[str] =0
if debug == 1:
_lowercase : Optional[int] =[1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
_lowercase : List[Any] =6_7
_lowercase : Any =interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 661
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> list:
snake_case = []
snake_case , snake_case = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
snake_case = result + left + right
return input_list
def __lowerCamelCase ( __lowerCAmelCase : list ) -> list:
if len(__lowerCAmelCase ) <= 1:
return input_list
snake_case = list(__lowerCAmelCase )
# iteration for two-way merging
snake_case = 2
while p <= len(__lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
snake_case = i
snake_case = i + p - 1
snake_case = (low + high + 1) // 2
snake_case = merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCAmelCase ):
snake_case = i
snake_case = merge(__lowerCAmelCase , 0 , __lowerCAmelCase , len(__lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 369
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_SCREAMING_SNAKE_CASE = tuple[int, int]
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] )-> None:
snake_case = vertices
snake_case = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def lowerCAmelCase ( self : Optional[Any] , __snake_case : EdgeT , __snake_case : int )-> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
snake_case = weight
def lowerCAmelCase ( self : str )-> Graph:
snake_case = Graph({min(self.vertices )} , {} )
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
while len(subgraph.vertices ) < len(self.vertices ):
snake_case = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case = edge
snake_case = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def __lowerCamelCase ( __lowerCAmelCase : str = "p107_network.txt" ) -> int:
snake_case = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
snake_case = {}
snake_case = 42
snake_case = 42
snake_case = 42
with open(__lowerCAmelCase ) as f:
snake_case = f.read().strip().split("""\n""" )
snake_case = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(__lowerCAmelCase ) ):
for edgea in range(__lowerCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case = int(adjaceny_matrix[edgea][edgea] )
snake_case = Graph(set(range(len(__lowerCAmelCase ) ) ) , __lowerCAmelCase )
snake_case = graph.prims_algorithm()
snake_case = sum(graph.edges.values() )
snake_case = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 369
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int = 3 , __magic_name__ : int = 7 , __magic_name__ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = 0
snake_case__ : str = 1
for current_denominator in range(1 , limit + 1 ):
snake_case__ : List[str] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case__ : Union[str, Any] = current_numerator
snake_case__ : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 419
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ : Any = state_dict.pop(__magic_name__ )
snake_case__ : Optional[Any] = val
def UpperCamelCase__ ( __magic_name__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case__ : Tuple = value
else:
snake_case__ : Union[str, Any] = value
return new_state_dict
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Tuple=False ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = """"""
if is_panoptic:
snake_case__ : int = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
snake_case__ : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[Any] = in_proj_weight[:2_56, :]
snake_case__ : int = in_proj_bias[:2_56]
snake_case__ : int = in_proj_weight[2_56:5_12, :]
snake_case__ : Tuple = in_proj_bias[2_56:5_12]
snake_case__ : int = in_proj_weight[-2_56:, :]
snake_case__ : Any = in_proj_bias[-2_56:]
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : str = """resnet101"""
if "dc5" in model_name:
snake_case__ : Optional[int] = True
snake_case__ : str = """panoptic""" in model_name
if is_panoptic:
snake_case__ : List[Any] = 2_50
else:
snake_case__ : Optional[Any] = 91
snake_case__ : Optional[Any] = """huggingface/label-files"""
snake_case__ : Optional[int] = """coco-detection-id2label.json"""
snake_case__ : int = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : Optional[Any] = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : Optional[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case__ : Union[str, Any] = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case__ : str = prepare_img()
snake_case__ : Tuple = image_processor(images=__magic_name__ , return_tensors="""pt""" )
snake_case__ : Any = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
snake_case__ : Optional[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , __magic_name__ , pretrained=__magic_name__ ).eval()
snake_case__ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : Dict = """conditional_detr.""" + src
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ , is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : List[Any] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case__ : Dict = state_dict.pop(__magic_name__ )
snake_case__ : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[int] = state_dict.pop(__magic_name__ )
snake_case__ : Optional[int] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case__ : str = state_dict.pop(__magic_name__ )
snake_case__ : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case__ : int = state_dict.pop(__magic_name__ )
snake_case__ : str = val
# finally, create HuggingFace model and load state dict
snake_case__ : Tuple = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
snake_case__ : Union[str, Any] = conditional_detr(__magic_name__ )
snake_case__ : Dict = model(__magic_name__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 419
| 1
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowerCamelCase =[
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
_lowerCamelCase =[
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _a ( ):
lowerCamelCase : Any = calculate_rouge(lowerCamelCase, lowerCamelCase, bootstrap_aggregation=lowerCamelCase, rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = calculate_rouge(lowerCamelCase, lowerCamelCase, bootstrap_aggregation=lowerCamelCase, rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _a ( ):
lowerCamelCase : str = """rougeLsum"""
lowerCamelCase : Optional[Any] = calculate_rouge(lowerCamelCase, lowerCamelCase, newline_sep=lowerCamelCase, rouge_keys=[k] )[k]
lowerCamelCase : Any = calculate_rouge(lowerCamelCase, lowerCamelCase, newline_sep=lowerCamelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def _a ( ):
lowerCamelCase : List[Any] = ["""rouge1""", """rouge2""", """rougeL"""]
lowerCamelCase : Optional[Any] = calculate_rouge(lowerCamelCase, lowerCamelCase, newline_sep=lowerCamelCase, rouge_keys=lowerCamelCase )
lowerCamelCase : str = calculate_rouge(lowerCamelCase, lowerCamelCase, newline_sep=lowerCamelCase, rouge_keys=lowerCamelCase )
assert score_sep == score_no_sep
def _a ( ):
lowerCamelCase : List[Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowerCamelCase : Dict = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(lowerCamelCase, lowerCamelCase, newline_sep=lowerCamelCase ) == calculate_rouge(lowerCamelCase, lowerCamelCase, newline_sep=lowerCamelCase )
def _a ( ):
lowerCamelCase : List[str] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowerCamelCase : Optional[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowerCamelCase : str = calculate_rouge(lowerCamelCase, lowerCamelCase, rouge_keys=["""rougeLsum"""], newline_sep=lowerCamelCase )["""rougeLsum"""]
lowerCamelCase : Union[str, Any] = calculate_rouge(lowerCamelCase, lowerCamelCase, rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _a ( ):
lowerCamelCase : List[str] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowerCamelCase : Optional[int] = calculate_rouge_path(data_dir.joinpath("""test.source""" ), data_dir.joinpath("""test.target""" ) )
assert isinstance(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ), data_dir.joinpath("""test.target""" ), bootstrap_aggregation=lowerCamelCase )
assert isinstance(lowerCamelCase, lowerCamelCase )
| 681
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
UpperCamelCase__ : Dict = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ : Optional[int] = is_compiled_module(SCREAMING_SNAKE_CASE )
if is_compiled:
UpperCamelCase__ : List[str] = model
UpperCamelCase__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ : Tuple = getattr(SCREAMING_SNAKE_CASE , '''forward''' )
UpperCamelCase__ : Optional[Any] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
UpperCamelCase__ : Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ : List[str] = forward
if getattr(SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE ):
convert_model(SCREAMING_SNAKE_CASE , to_transformer_engine=SCREAMING_SNAKE_CASE )
if is_compiled:
UpperCamelCase__ : str = model
UpperCamelCase__ : int = compiled_model
return model
def _a ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@contextmanager
def _a ( **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for key, value in kwargs.items():
UpperCamelCase__ : Tuple = str(SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if not hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ):
UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE , '''__class__''' , SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = destination.setdefault(SCREAMING_SNAKE_CASE , {} )
merge_dicts(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Optional[Any] = value
return destination
def _a ( SCREAMING_SNAKE_CASE : int = None ):
"""simple docstring"""
if port is None:
UpperCamelCase__ : Optional[int] = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 700
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = ["image_processor", "tokenizer"]
A: Optional[Any] = "LayoutLMv2ImageProcessor"
A: List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCamelCase__ : Optional[Any] = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ : Optional[int] = features['''words''']
UpperCamelCase__ : Optional[int] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
UpperCamelCase__ : Optional[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCamelCase__ : Union[str, Any] = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCamelCase__ : Tuple = images
return encoded_inputs
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}" )
return images_with_overflow
def UpperCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 106
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A ( a__ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = """deit"""
def __init__( self : Optional[Any] , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : str=12 , _UpperCamelCase : Any=3_072 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : int=1e-12 , _UpperCamelCase : int=224 , _UpperCamelCase : Any=16 , _UpperCamelCase : Dict=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]=16 , **_UpperCamelCase : Optional[int] , ):
super().__init__(**_A)
_lowercase: List[Any] = hidden_size
_lowercase: Union[str, Any] = num_hidden_layers
_lowercase: Optional[int] = num_attention_heads
_lowercase: Any = intermediate_size
_lowercase: str = hidden_act
_lowercase: List[str] = hidden_dropout_prob
_lowercase: Union[str, Any] = attention_probs_dropout_prob
_lowercase: int = initializer_range
_lowercase: List[str] = layer_norm_eps
_lowercase: Optional[int] = image_size
_lowercase: Optional[Any] = patch_size
_lowercase: List[Any] = num_channels
_lowercase: List[Any] = qkv_bias
_lowercase: Optional[int] = encoder_stride
class A ( a__ ):
'''simple docstring'''
lowerCamelCase : Dict = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self : Dict):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return 1e-4
| 226
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "data2vec-vision"
def __init__( self ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1E-12 ,_A=224 ,_A=16 ,_A=3 ,_A=False ,_A=False ,_A=False ,_A=False ,_A=0.1 ,_A=0.1 ,_A=True ,_A=[3, 5, 7, 11] ,_A=[1, 2, 3, 6] ,_A=True ,_A=0.4 ,_A=256 ,_A=1 ,_A=False ,_A=255 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : int = use_mask_token
_lowerCAmelCase : Any = use_absolute_position_embeddings
_lowerCAmelCase : List[str] = use_relative_position_bias
_lowerCAmelCase : str = use_shared_relative_position_bias
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : int = out_indices
_lowerCAmelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : int = use_auxiliary_head
_lowerCAmelCase : Any = auxiliary_loss_weight
_lowerCAmelCase : List[str] = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : str = auxiliary_concat_input
_lowerCAmelCase : Union[str, Any] = semantic_loss_ignore_index
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 259
| 0
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _UpperCamelCase ( UpperCamelCase__ ):
for param in module.parameters():
UpperCAmelCase__ : Optional[int] = False
def _UpperCamelCase ( ):
UpperCAmelCase__ : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase__ : Tuple = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = plt.imshow(UpperCamelCase__ )
fig.axes.get_xaxis().set_visible(UpperCamelCase__ )
fig.axes.get_yaxis().set_visible(UpperCamelCase__ )
plt.show()
def _UpperCamelCase ( ):
UpperCAmelCase__ : Tuple = datetime.now()
UpperCAmelCase__ : Tuple = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 113
|
'''simple docstring'''
import os
def _UpperCamelCase ( UpperCamelCase__ = "input.txt" ):
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as input_file:
UpperCAmelCase__ : Tuple = [
[int(UpperCamelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase__ : Optional[Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : Any = len(matrix[0] )
UpperCAmelCase__ : Optional[int] = [[-1 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Any = matrix[i][0]
for j in range(1 , UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase__ : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 113
| 1
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
def __init__( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any]=13 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Any=224 , __UpperCamelCase : List[str]=1_000 , __UpperCamelCase : Optional[Any]=[3, 3, 6, 4] , __UpperCamelCase : Union[str, Any]=[48, 56, 112, 220] , ) -> Optional[int]:
A = parent
A = batch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = num_labels
A = image_size
A = layer_depths
A = embed_dims
def __UpperCamelCase ( self : str ) -> Any:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Dict ) -> str:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCamelCase , layer_scale_init_value=1e-5 , )
def __UpperCamelCase ( self : str , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
A = SwiftFormerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict ) -> Any:
A = self.num_labels
A = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
((A) , (A) , (A)) = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
A_ : List[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
A_ : List[str] = False
A_ : List[Any] = False
A_ : int = False
A_ : List[str] = False
A_ : Any = False
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = SwiftFormerModelTester(self )
A = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def __UpperCamelCase ( self : str ) -> int:
pass
def __UpperCamelCase ( self : Dict ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : int ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwiftFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
pass
def __UpperCamelCase ( self : str ) -> Optional[Any]:
def check_hidden_states_output(__UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = outputs.hidden_states
A = 8
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__UpperCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> List[str]:
def _config_zero_init(__UpperCamelCase : int ):
A = copy.deepcopy(__UpperCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__UpperCamelCase , __UpperCamelCase , 1e-10 )
if isinstance(getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ):
A = _config_zero_init(getattr(__UpperCamelCase , __UpperCamelCase ) )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return configs_no_init
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
A = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
pass
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 106
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.array:
'''simple docstring'''
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(lowerCAmelCase__ ):
A = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
A = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__( self : int , *lowercase : int , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *lowercase : Any , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : Any , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__( self : Tuple , *lowercase : Dict , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : Any , *lowercase : Tuple , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *lowercase : Any , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__( self : Dict , *lowercase : List[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *lowercase : Optional[int] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__( self : Dict , *lowercase : Optional[Any] , **lowercase : str ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __lowerCAmelCase ( cls : List[Any] , *lowercase : int , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ : List[str] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
snake_case_ : List[Any] = {
"""camembert-base""": 5_1_2,
}
snake_case_ : Any = """▁"""
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowercase : Union[str, Any] , lowercase : str="<s>" , lowercase : str="</s>" , lowercase : Optional[int]="</s>" , lowercase : Dict="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : List[Any]="<pad>" , lowercase : Any="<mask>" , lowercase : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
UpperCAmelCase : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCAmelCase : Union[str, Any] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCAmelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Dict = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Dict , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def __lowerCAmelCase ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def __lowerCAmelCase ( self : int , lowercase : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowercase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowercase )
def __lowerCAmelCase ( self : Any , lowercase : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
UpperCAmelCase : Tuple = ""
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = []
else:
current_sub_tokens.append(lowercase )
UpperCAmelCase : Any = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def __getstate__( self : str ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.__dict__.copy()
UpperCAmelCase : Dict = None
return state
def __setstate__( self : List[str] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : Any = {}
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Optional[Any] = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 292
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def _a ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : Any = ''
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : int = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = val
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : int=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTHybridConfig(backbone_config=lowercase__ , image_size=3_84 , num_labels=10_00 )
SCREAMING_SNAKE_CASE__ : Any = False
# load original model from timm
SCREAMING_SNAKE_CASE__ : str = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ : int = ViTHybridModel(lowercase__ ).eval()
else:
SCREAMING_SNAKE_CASE__ : Tuple = ViTHybridForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# create image processor
SCREAMING_SNAKE_CASE__ : List[Any] = create_transform(**resolve_data_config({} , model=lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = transform.transforms
SCREAMING_SNAKE_CASE__ : List[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTHybridImageProcessor(
do_resize=lowercase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = transform(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = timm_model.forward_features(lowercase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 )
else:
SCREAMING_SNAKE_CASE__ : Dict = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 85
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase_ = logging.getLogger(__name__)
def snake_case( __magic_name__=2 , __magic_name__=3 , __magic_name__=16 , __magic_name__ = 10 , __magic_name__ = 2 ) -> List[Any]:
'''simple docstring'''
def get_dataset(__magic_name__ ):
lowercase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__magic_name__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase : Any = get_dataset(__magic_name__ )
lowercase : List[str] = get_dataset(__magic_name__ )
lowercase : Optional[Any] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
lowercase : Union[str, Any] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict = []
for epoch in range(__magic_name__ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase , lowercase : Union[str, Any] = batch
lowercase : Optional[int] = model(__magic_name__ )
lowercase : str = torch.nn.functional.mse_loss(__magic_name__ , __magic_name__ )
accelerator.backward(__magic_name__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _A ( nn.Module ):
def __init__( self : Dict ) -> Any:
"""simple docstring"""
super().__init__()
lowercase : Optional[int] = nn.Parameter(torch.randn(1 ) )
lowercase : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def __a ( self : Union[str, Any] , _A : Optional[int] ) -> List[Any]:
"""simple docstring"""
return x * self.a + self.b
class _A ( unittest.TestCase ):
def __a ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : Tuple = DummyModel()
lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : List[str] = dummy_dataloaders()
lowercase : int = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A )
# Train baseline
lowercase : List[Any] = Accelerator(project_config=_A )
lowercase , lowercase , lowercase , lowercase : Dict = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : str = DummyModel()
lowercase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : Tuple = dummy_dataloaders()
# Train baseline
lowercase : Any = Accelerator()
lowercase , lowercase , lowercase , lowercase : Dict = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
lowercase : int = os.path.join(_A , '''initial''' )
accelerator.save_state(_A )
((lowercase) , (lowercase)) : Optional[int] = model.a.item(), model.b.item()
lowercase : Tuple = optimizer.state_dict()
lowercase : List[Any] = train(3 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : Optional[Any] = model.a.item(), model.b.item()
lowercase : Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase : List[str] = DummyModel()
lowercase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : Any = dummy_dataloaders()
lowercase : Optional[int] = Accelerator()
lowercase , lowercase , lowercase , lowercase : Dict = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(_A )
((lowercase) , (lowercase)) : Any = model.a.item(), model.b.item()
lowercase : Optional[int] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
lowercase : List[Any] = train(2 , _A , _A , _A , _A )
# Save everything
lowercase : Any = os.path.join(_A , '''checkpoint''' )
accelerator.save_state(_A )
# Load everything back in and make sure all states work
accelerator.load_state(_A )
test_rands += train(1 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : int = model.a.item(), model.b.item()
lowercase : Any = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def __a ( self : Any ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : List[Any] = DummyModel()
lowercase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : List[Any] = dummy_dataloaders()
lowercase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
lowercase : List[str] = Accelerator(project_dir=_A , project_config=_A )
lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
((lowercase) , (lowercase)) : Any = model.a.item(), model.b.item()
lowercase : Dict = optimizer.state_dict()
lowercase : List[Any] = train(3 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : Optional[Any] = model.a.item(), model.b.item()
lowercase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase : Optional[int] = DummyModel()
lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase : Tuple = dummy_dataloaders()
lowercase : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A )
lowercase : Tuple = Accelerator(project_dir=_A , project_config=_A )
lowercase , lowercase , lowercase , lowercase : Tuple = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
((lowercase) , (lowercase)) : Optional[Any] = model.a.item(), model.b.item()
lowercase : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
lowercase : Tuple = train(2 , _A , _A , _A , _A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _A , _A , _A , _A )
((lowercase) , (lowercase)) : str = model.a.item(), model.b.item()
lowercase : Optional[Any] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = torch.tensor([1, 2, 3] )
lowercase : Dict = torch.tensor([2, 3, 4] )
lowercase : Union[str, Any] = DummyModel()
lowercase : int = torch.optim.Adam(net.parameters() )
lowercase : str = Accelerator()
with self.assertRaises(_A ) as ve:
accelerator.register_for_checkpointing(_A , _A , _A , _A )
lowercase : Union[str, Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : Optional[int] = DummyModel()
lowercase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase : List[str] = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 )
lowercase , lowercase : int = dummy_dataloaders()
lowercase : Dict = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
lowercase : Any = Accelerator(project_dir=_A , project_config=_A )
lowercase , lowercase , lowercase , lowercase , lowercase : int = accelerator.prepare(
_A , _A , _A , _A , _A )
# Save initial
accelerator.save_state()
lowercase : int = scheduler.state_dict()
train(3 , _A , _A , _A , _A , _A )
self.assertNotEqual(_A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_A , scheduler.state_dict() )
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : List[Any] = DummyModel()
lowercase : str = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 )
# Train baseline
lowercase : Dict = Accelerator(project_dir=_A , project_config=_A )
lowercase : Optional[int] = accelerator.prepare(_A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : Optional[int] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase_ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase_ = DummyModel()
lowerCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
lowerCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
lowerCAmelCase_ , lowerCAmelCase_ = dummy_dataloaders()
lowerCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 217
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A__: Tuple = get_tests_dir('''fixtures''')
A__: Tuple = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
A__: Dict = get_tests_dir('''fixtures/dummy-config.json''')
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Dict = 0
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__: Optional[int] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__: List[str] = AutoFeatureExtractor.from_pretrained(__lowerCamelCase ).to_dict()
config_dict.pop("feature_extractor_type" )
UpperCamelCase__: Optional[Any] = WavaVecaFeatureExtractor(**__lowerCamelCase )
# save in new folder
model_config.save_pretrained(__lowerCamelCase )
config.save_pretrained(__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
# make sure private variable is not incorrectly saved
UpperCamelCase__: str = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: List[Any] = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase__: List[str] = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase__: Any = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCamelCase__: Dict = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
with self.assertRaises(__lowerCamelCase ):
UpperCamelCase__: Any = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
UpperCamelCase__: Optional[Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
UpperCamelCase__: int = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
UpperCamelCase__: int = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__: int = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = True
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# If remote code is not set, the default is to use local
UpperCamelCase__: Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__: Dict = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__: List[Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__lowerCamelCase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 221
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__: Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( A_ ,A_):
return (preds == labels).mean()
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys())})
UpperCamelCase__ = field(metadata={"""help""": """Should contain the data files for the task."""})
UpperCamelCase__ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" ,A_)
# Set seed
set_seed(training_args.seed)
try:
UpperCamelCase__: str = processors[data_args.task_name]()
UpperCamelCase__: Tuple = processor.get_labels()
UpperCamelCase__: str = len(A_)
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=A_ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
UpperCamelCase__: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
UpperCamelCase__: Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path) ,config=A_ ,cache_dir=model_args.cache_dir ,)
# Get datasets
UpperCamelCase__: int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=A_ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
UpperCamelCase__: Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=A_ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(A_) -> Dict:
UpperCamelCase__: Optional[int] = np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(A_ ,p.label_ids)}
# Data collator
UpperCamelCase__: str = DataCollatorWithPadding(A_ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__: int = Trainer(
model=A_ ,args=A_ ,train_dataset=A_ ,eval_dataset=A_ ,compute_metrics=A_ ,data_collator=A_ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
UpperCamelCase__: Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
UpperCamelCase__: List[Any] = trainer.evaluate()
UpperCamelCase__: Dict = os.path.join(training_args.output_dir ,"eval_results.txt")
if trainer.is_world_master():
with open(A_ ,"w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s" ,A_ ,A_)
writer.write("%s = %s\n" % (key, value))
results.update(A_)
return results
def lowerCAmelCase_ ( A_):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 221
| 1
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase__ :
__lowerCamelCase = None
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__: Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__: Any = os.path.join(A__ , """feat_extract.json""" )
feat_extract_first.to_json_file(A__ )
lowerCamelCase__: Union[str, Any] = self.feature_extraction_class.from_json_file(A__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__: int = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
lowerCamelCase__: int = self.feature_extraction_class.from_pretrained(A__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.feature_extraction_class()
self.assertIsNotNone(A__ )
| 306
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase__ :List[str] = get_logger()
UpperCAmelCase__ :Optional[dict] = None
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Any , A__ : List[str]=None , A__ : Tuple=None , **A__ : Dict ):
"""simple docstring"""
super().__init__(features=A__ )
import jax
from jaxlib.xla_client import Device
if isinstance(A__ , A__ ):
raise ValueError(
f"Expected {device} to be a `str` not {type(A__ )}, as `jaxlib.xla_extension.Device` "
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
__lowerCamelCase : Optional[int] = device if isinstance(A__ , A__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCamelCase : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
f"device: {str(jax.devices()[0] )}." )
__lowerCamelCase : str = str(jax.devices()[0] )
__lowerCamelCase : Optional[Any] = jnp_array_kwargs
@staticmethod
def a_ ( ):
"""simple docstring"""
import jax
return {str(A__ ): device for device in jax.devices()}
def a_ ( self : Any , A__ : List[str] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(A__ , A__ ) and column:
if all(
isinstance(A__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A__ , axis=0 )
return column
def a_ ( self : Tuple , A__ : Optional[int] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(A__ , (str, bytes, type(A__ )) ):
return value
elif isinstance(A__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__lowerCamelCase : Dict = {}
if isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__lowerCamelCase : List[Any] = {"""dtype""": jnp.intaa}
else:
__lowerCamelCase : Any = {"""dtype""": jnp.intaa}
elif isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__lowerCamelCase : int = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A__ , PIL.Image.Image ):
__lowerCamelCase : List[str] = np.asarray(A__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCamelCase : Tuple = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A__ , **{**default_dtype, **self.jnp_array_kwargs} )
def a_ ( self : int , A__ : Any ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A__ , """__array__""" ) and not isinstance(A__ , jax.Array ):
__lowerCamelCase : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
elif isinstance(A__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
return self._tensorize(A__ )
def a_ ( self : Tuple , A__ : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , A__ , map_list=A__ )
def a_ ( self : Any , A__ : pa.Table ):
"""simple docstring"""
__lowerCamelCase : List[str] = self.numpy_arrow_extractor().extract_row(A__ )
__lowerCamelCase : str = self.python_features_decoder.decode_row(A__ )
return self.recursive_tensorize(A__ )
def a_ ( self : Dict , A__ : pa.Table ):
"""simple docstring"""
__lowerCamelCase : List[str] = self.numpy_arrow_extractor().extract_column(A__ )
__lowerCamelCase : int = self.python_features_decoder.decode_column(A__ , pa_table.column_names[0] )
__lowerCamelCase : Optional[int] = self.recursive_tensorize(A__ )
__lowerCamelCase : List[Any] = self._consolidate(A__ )
return column
def a_ ( self : Tuple , A__ : pa.Table ):
"""simple docstring"""
__lowerCamelCase : Any = self.numpy_arrow_extractor().extract_batch(A__ )
__lowerCamelCase : int = self.python_features_decoder.decode_batch(A__ )
__lowerCamelCase : Union[str, Any] = self.recursive_tensorize(A__ )
for column_name in batch:
__lowerCamelCase : Optional[int] = self._consolidate(batch[column_name] )
return batch
| 150
| 0
|
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = True
while ask_again:
__UpperCAmelCase : Dict = input(UpperCamelCase )
try:
if default is not None and len(UpperCamelCase ) == 0:
return default
return convert_value(UpperCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=[] , UpperCamelCase=None , UpperCamelCase=0 ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = BulletMenu(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = menu.run(default_choice=UpperCamelCase )
return convert_value(UpperCamelCase ) if convert_value is not None else result
def _UpperCamelCase ( UpperCamelCase ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = int(UpperCamelCase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = int(UpperCamelCase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( UpperCamelCase ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = int(UpperCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( UpperCamelCase ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : str = int(UpperCamelCase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : str = int(UpperCamelCase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
def a_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = super()._format_usage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = usage.replace("<command> [<args>] " , "")
return usage
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class a__ ( __magic_name__ ):
lowercase_ = "gpt_bigcode"
lowercase_ = ["past_key_values"]
lowercase_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , UpperCamelCase_ : Tuple=50257 , UpperCamelCase_ : Dict=1024 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]="gelu_pytorch_tanh" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=50256 , UpperCamelCase_ : Union[str, Any]=50256 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=True , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = n_positions
__UpperCAmelCase : Tuple = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Dict = n_head
__UpperCAmelCase : Optional[Any] = n_inner
__UpperCAmelCase : Optional[Any] = activation_function
__UpperCAmelCase : List[str] = resid_pdrop
__UpperCAmelCase : List[Any] = embd_pdrop
__UpperCAmelCase : Optional[Any] = attn_pdrop
__UpperCAmelCase : Dict = layer_norm_epsilon
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : int = scale_attn_weights
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : List[Any] = attention_softmax_in_fpaa
__UpperCAmelCase : Any = scale_attention_softmax_in_fpaa
__UpperCAmelCase : str = multi_query
__UpperCAmelCase : int = bos_token_id
__UpperCAmelCase : str = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
| 487
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = '''altclip_text_model'''
def __init__( self , a__=250002 , a__=1024 , a__=24 , a__=16 , a__=4096 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=0.0_2 , a__=0.0_2 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=768 , **a__ , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Tuple = initializer_factor
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : int = project_dim
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = '''altclip_vision_model'''
def __init__( self , a__=768 , a__=3072 , a__=512 , a__=12 , a__=12 , a__=3 , a__=224 , a__=32 , a__="quick_gelu" , a__=1e-5 , a__=0.0 , a__=0.0_2 , a__=1.0 , **a__ , ):
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Tuple = projection_dim
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = initializer_factor
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Optional[int] = hidden_act
@classmethod
def __A ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
_lowerCAmelCase : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = '''altclip'''
_UpperCamelCase : List[Any] = True
def __init__( self , a__=None , a__=None , a__=768 , a__=2.6_5_9_2 , **a__ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_lowerCAmelCase : Optional[int] = kwargs.pop("""text_config_dict""" , UpperCAmelCase_ )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""vision_config_dict""" , UpperCAmelCase_ )
super().__init__(**UpperCAmelCase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_lowerCAmelCase : Optional[Any] = {}
# This is the complete result when using `text_config_dict`.
_lowerCAmelCase : Union[str, Any] = AltCLIPTextConfig(**UpperCAmelCase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_lowerCAmelCase : int = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCAmelCase : Any = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(UpperCAmelCase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_lowerCAmelCase : Dict = {}
# This is the complete result when using `vision_config_dict`.
_lowerCAmelCase : Any = AltCLIPVisionConfig(**UpperCAmelCase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_lowerCAmelCase : List[str] = {
str(UpperCAmelCase_ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_lowerCAmelCase : Union[str, Any] = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCAmelCase : Tuple = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(UpperCAmelCase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_lowerCAmelCase : Tuple = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
_lowerCAmelCase : Dict = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
_lowerCAmelCase : Optional[int] = AltCLIPTextConfig(**UpperCAmelCase_ )
_lowerCAmelCase : Tuple = AltCLIPVisionConfig(**UpperCAmelCase_ )
_lowerCAmelCase : Dict = projection_dim
_lowerCAmelCase : Union[str, Any] = logit_scale_init_value
_lowerCAmelCase : Optional[Any] = 1.0
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def __A ( self ):
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : int = self.text_config.to_dict()
_lowerCAmelCase : Optional[int] = self.vision_config.to_dict()
_lowerCAmelCase : Dict = self.__class__.model_type
return output
| 213
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = KandinskyVaaPipeline
UpperCamelCase_ : List[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
]
UpperCamelCase_ : Tuple = ['''image_embeds''', '''negative_image_embeds''']
UpperCamelCase_ : Any = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ : List[str] = False
@property
def _A ( self : List[Any] ):
return 32
@property
def _A ( self : List[Any] ):
return 32
@property
def _A ( self : Any ):
return self.time_input_dim
@property
def _A ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def _A ( self : Tuple ):
return 100
@property
def _A ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _A ( self : int ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE : str = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=0 ):
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = "cpu"
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Tuple = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
SCREAMING_SNAKE_CASE : int = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Tuple = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = "red cat, 4k photo"
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 62
| 0
|
"""simple docstring"""
A__ : List[Any]= range(2, 20 + 1)
A__ : List[str]= [10**k for k in range(ks[-1] + 1)]
A__ : dict[int, dict[int, list[list[int]]]]= {}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase__ = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase__ , UpperCamelCase__ = 0, 0
UpperCamelCase__ = n - i
UpperCamelCase__ = memo.get(SCREAMING_SNAKE_CASE )
if sub_memo is not None:
UpperCamelCase__ = sub_memo.get(SCREAMING_SNAKE_CASE )
if jumps is not None and len(SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
UpperCamelCase__ = -1
for _k in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase__ = _k
break
if max_jump >= 0:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase__ = diff + c
for j in range(min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ):
UpperCamelCase__ , UpperCamelCase__ = divmod(SCREAMING_SNAKE_CASE , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = []
else:
UpperCamelCase__ = {c: []}
UpperCamelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase__ , UpperCamelCase__ = next_term(SCREAMING_SNAKE_CASE , k - 1 , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase__ , UpperCamelCase__ = compute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
UpperCamelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase__ = 0
while j < len(SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase__ = i
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase__ = ds_c + ds_b
diff += addend
UpperCamelCase__ = 0
for j in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = a_i[j] + addend
UpperCamelCase__ , UpperCamelCase__ = divmod(SCREAMING_SNAKE_CASE , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return diff, i - start_i
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = digits[j] + addend
if s >= 10:
UpperCamelCase__ , UpperCamelCase__ = divmod(SCREAMING_SNAKE_CASE , 10 )
UpperCamelCase__ = addend // 10 + quotient
else:
UpperCamelCase__ = s
UpperCamelCase__ = addend // 10
if addend == 0:
break
while addend > 0:
UpperCamelCase__ , UpperCamelCase__ = divmod(SCREAMING_SNAKE_CASE , 10 )
digits.append(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 10**15 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [1]
UpperCamelCase__ = 1
UpperCamelCase__ = 0
while True:
UpperCamelCase__ , UpperCamelCase__ = next_term(SCREAMING_SNAKE_CASE , 20 , i + dn , SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase__ = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 703
|
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def snake_case ( UpperCamelCase__ : int ) -> bool:
lowerCamelCase : List[Any] = 0
lowerCamelCase : List[Any] = number
while duplicate > 0:
lowerCamelCase , lowerCamelCase : List[Any] = divmod(UpperCamelCase__ , 10 )
fact_sum += factorial(UpperCamelCase__ )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
__lowerCamelCase :Optional[int] = int(input('Enter number: ').strip())
print(
f"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 222
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case ( UpperCamelCase__ : Any ) -> Dict:
if "cls_token" in name:
lowerCamelCase : str = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCamelCase : int = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCamelCase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Tuple = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCamelCase : Union[str, Any] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCamelCase : Tuple = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCamelCase : List[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCamelCase : List[Any] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCamelCase : str = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase : List[Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase : str = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : Any = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowerCamelCase : int = key.split(""".""" )
lowerCamelCase : Dict = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase : List[str] = config.decoder_hidden_size
lowerCamelCase : str = """decoder.decoder_layers."""
if "weight" in key:
lowerCamelCase : Dict = val[:dim, :]
lowerCamelCase : Optional[Any] = val[dim : dim * 2, :]
lowerCamelCase : Tuple = val[-dim:, :]
elif "bias" in key:
lowerCamelCase : Optional[Any] = val[:dim]
lowerCamelCase : Optional[Any] = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : Optional[int] = config.hidden_size
lowerCamelCase : Tuple = """vit.encoder.layer."""
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
elif "bias" in key:
lowerCamelCase : Any = val[:dim]
lowerCamelCase : Union[str, Any] = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : Dict = val
return orig_state_dict
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Any:
lowerCamelCase : Optional[int] = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase : List[Any] = 1024
lowerCamelCase : Union[str, Any] = 4096
lowerCamelCase : Dict = 24
lowerCamelCase : int = 16
elif "huge" in checkpoint_url:
lowerCamelCase : Dict = 14
lowerCamelCase : int = 1280
lowerCamelCase : Any = 5120
lowerCamelCase : int = 32
lowerCamelCase : Dict = 16
lowerCamelCase : str = ViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
lowerCamelCase : Optional[int] = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase : int = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
lowerCamelCase : Tuple = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCamelCase : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
lowerCamelCase : str = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : Any = model(**UpperCamelCase__ )
lowerCamelCase : int = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase : List[Any] = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowerCamelCase : int = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 222
| 1
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : str ):
'''simple docstring'''
if os.path.exists(lowercase__ ):
if os.path.exists(os.path.join(lowercase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowercase__ , """config.json""" ) ):
os.remove(os.path.join(lowercase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowercase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowercase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowercase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowercase__ )
model.save_pretrained(lowercase__ )
def snake_case_ ( lowercase__ : List[str] , lowercase__ : Tuple=False ):
'''simple docstring'''
_lowerCAmelCase =2
if unlogit:
_lowerCAmelCase =torch.pow(lowercase__ , lowercase__ )
_lowerCAmelCase =p * torch.log(lowercase__ )
_lowerCAmelCase =0
return -plogp.sum(dim=-1 )
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(f"{x + 1}" for x in range(len(lowercase__ ) ) ) )
for row in range(len(lowercase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:d}" for x in tensor[row].cpu().data ) )
def snake_case_ ( lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[str]=True , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=None , lowercase__ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCAmelCase =torch.zeros(lowercase__ , lowercase__ ).to(args.device )
_lowerCAmelCase =torch.zeros(lowercase__ , lowercase__ ).to(args.device )
if head_mask is None:
_lowerCAmelCase =torch.ones(lowercase__ , lowercase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowercase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCAmelCase =None
_lowerCAmelCase =0.0
_lowerCAmelCase =0.0
for step, inputs in enumerate(tqdm(lowercase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCAmelCase =tuple(t.to(args.device ) for t in inputs )
((_lowerCAmelCase) , ) =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCAmelCase =model(lowercase__ , labels=lowercase__ , head_mask=lowercase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowercase__ ):
_lowerCAmelCase =entropy(attn.detach() , lowercase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowercase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCAmelCase =2
_lowerCAmelCase =torch.pow(torch.pow(lowercase__ , lowercase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCAmelCase =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowercase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowercase__ )
logger.info("""Head ranked by importance scores""" )
_lowerCAmelCase =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCAmelCase =torch.arange(
head_importance.numel() , device=args.device )
_lowerCAmelCase =head_ranks.view_as(lowercase__ )
print_ad_tensor(lowercase__ )
return attn_entropy, head_importance, total_loss
def snake_case_ ( lowercase__ : str , lowercase__ : int , lowercase__ : Tuple ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ )
_lowerCAmelCase =1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowercase__ , original_score * args.masking_threshold )
_lowerCAmelCase =torch.ones_like(lowercase__ )
_lowerCAmelCase =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCAmelCase =original_score
while current_score >= original_score * args.masking_threshold:
_lowerCAmelCase =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCAmelCase =float("""Inf""" )
_lowerCAmelCase =head_importance.view(-1 ).sort()[1]
if len(lowercase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
_lowerCAmelCase =current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
_lowerCAmelCase =new_head_mask.view(-1 )
_lowerCAmelCase =0.0
_lowerCAmelCase =new_head_mask.view_as(lowercase__ )
_lowerCAmelCase =new_head_mask.clone().detach()
print_ad_tensor(lowercase__ )
# Compute metric and head importance again
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , head_mask=lowercase__ )
_lowerCAmelCase =1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowercase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowercase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case_ ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase =datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ )
_lowerCAmelCase =1 / loss
_lowerCAmelCase =datetime.now() - before_time
_lowerCAmelCase =sum(p.numel() for p in model.parameters() )
_lowerCAmelCase ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase =[
v,
]
assert sum(len(lowercase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowercase__ )
_lowerCAmelCase =sum(p.numel() for p in model.parameters() )
_lowerCAmelCase =datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ , actually_pruned=lowercase__ , )
_lowerCAmelCase =1 / loss
_lowerCAmelCase =datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowercase__ , lowercase__ , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowercase__ , lowercase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(lowercase__ , args.output_dir )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowercase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowercase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowercase__ , type=lowercase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowercase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowercase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowercase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowercase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=lowercase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowercase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowercase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowercase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowercase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowercase__ , default="""""" , help="""Can be used for distant debugging.""" )
_lowerCAmelCase =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowercase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCAmelCase =torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
_lowerCAmelCase =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCAmelCase =torch.device("""cuda""" , args.local_rank )
_lowerCAmelCase =1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCAmelCase =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCAmelCase =nn.parallel.DistributedDataParallel(
lowercase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowercase__ )
elif args.n_gpu > 1:
_lowerCAmelCase =nn.DataParallel(lowercase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowercase__ )
torch.save(lowercase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Prepare dataset
_lowerCAmelCase =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCAmelCase =(torch.from_numpy(lowercase__ ),)
_lowerCAmelCase =TensorDataset(*lowercase__ )
_lowerCAmelCase =RandomSampler(lowercase__ )
_lowerCAmelCase =DataLoader(lowercase__ , sampler=lowercase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowercase__ , lowercase__ , lowercase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCAmelCase =mask_heads(lowercase__ , lowercase__ , lowercase__ )
prune_heads(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 149
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
UpperCamelCase_ = False
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Dict=32 ):
"""simple docstring"""
set_seed(0 )
__lowerCAmelCase = UNetaDModel(sample_size=snake_case__ , in_channels=3 , out_channels=3 )
__lowerCAmelCase = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=snake_case__ , )
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=snake_case__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__lowerCAmelCase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(snake_case__ ) for _ in range(4 )]
__lowerCAmelCase = [torch.randn((4, 3, 32, 32) ).to(snake_case__ ) for _ in range(4 )]
__lowerCAmelCase = [torch.randint(0 , 1_000 , (4,) ).long().to(snake_case__ ) for _ in range(4 )]
# train with a DDPM scheduler
__lowerCAmelCase , __lowerCAmelCase = self.get_model_optimizer(resolution=32 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
__lowerCAmelCase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__lowerCAmelCase = model(snake_case__ , timesteps[i] ).sample
__lowerCAmelCase = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__lowerCAmelCase , __lowerCAmelCase = self.get_model_optimizer(resolution=32 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
__lowerCAmelCase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__lowerCAmelCase = model(snake_case__ , timesteps[i] ).sample
__lowerCAmelCase = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 611
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
hf_model.apply_weight_norm()
__lowerCAmelCase = checkpoint["input_conv.weight_g"]
__lowerCAmelCase = checkpoint["input_conv.weight_v"]
__lowerCAmelCase = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
__lowerCAmelCase = checkpoint["output_conv.1.weight_g"]
__lowerCAmelCase = checkpoint["output_conv.1.weight_v"]
__lowerCAmelCase = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: str=None , UpperCamelCase: Tuple=None , ):
"""simple docstring"""
if config_path is not None:
__lowerCAmelCase = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase )
else:
__lowerCAmelCase = SpeechTaHifiGanConfig()
__lowerCAmelCase = SpeechTaHifiGan(UpperCamelCase )
__lowerCAmelCase = torch.load(UpperCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = np.load(UpperCamelCase )
__lowerCAmelCase = stats[0].reshape(-1 )
__lowerCAmelCase = stats[1].reshape(-1 )
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).float()
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).float()
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 611
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
__a = {}
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a = {}
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
__a = probability
def __a ( self : List[str] ):
'''simple docstring'''
return list(self.connections )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a = 0
__a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, int]:
"""simple docstring"""
__a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = Counter(graph.get_nodes() )
__a = start
for _ in range(__SCREAMING_SNAKE_CASE ):
__a = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__a = []
__a = set({"""(""", """[""", """{"""} )
__a = set({""")""", """]""", """}"""} )
__a = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__SCREAMING_SNAKE_CASE ) == 0 or (len(__SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__SCREAMING_SNAKE_CASE ) == 0
def __lowercase ( ) -> List[str]:
"""simple docstring"""
__a = input("""Enter sequence of brackets: """ )
if is_balanced(__SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , """is balanced""" )
else:
print(__SCREAMING_SNAKE_CASE , """is not balanced""" )
if __name__ == "__main__":
main()
| 201
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.