code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import math
def _a ( lowerCamelCase_ ):
if num <= 0:
snake_case : str =F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCamelCase_ )
snake_case : Optional[int] =[True] * (num + 1)
snake_case : List[str] =[]
snake_case : str =2
snake_case : Union[str, Any] =int(math.sqrt(lowerCamelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase_ ):
if sieve[i] is True:
snake_case : List[str] =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCamelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline
__UpperCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__UpperCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__UpperCAmelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__UpperCAmelCase = False
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return 32
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 100
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[str] ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case : Optional[Any] =UNetaDConditionModel(**_snake_case )
return model
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : str =self.dummy_unet
snake_case : str =self.dummy_movq
snake_case : int ={
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case : Optional[int] =DDIMScheduler(**_snake_case )
snake_case : Union[str, Any] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __snake_case ( self : List[str], _snake_case : int, _snake_case : Dict=0 ):
'''simple docstring'''
snake_case : str =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case : str =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
snake_case : Union[str, Any] =floats_tensor((1, 3, 64, 64), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case : Optional[int] =image.cpu().permute(0, 2, 3, 1 )[0]
snake_case : int =Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
snake_case : int =floats_tensor((1, 3, 64, 64), rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('''mps''' ):
snake_case : Dict =torch.manual_seed(_snake_case )
else:
snake_case : Union[str, Any] =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case : Tuple ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : int ='''cpu'''
snake_case : Tuple =self.get_dummy_components()
snake_case : Optional[int] =self.pipeline_class(**_snake_case )
snake_case : Any =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Optional[int] =pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case : Tuple =output.images
snake_case : Optional[int] =pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
snake_case : Union[str, Any] =image[0, -3:, -3:, -1]
snake_case : Dict =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : str =np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
snake_case : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case : List[Any] =init_image.resize((512, 512) )
snake_case : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case : List[Any] =torch.from_numpy(np.array(_snake_case ) ).float() / 255.0
snake_case : Optional[Any] =hint.permute(2, 0, 1 ).unsqueeze(0 )
snake_case : Any ='''A robot, 4k photo'''
snake_case : List[Any] =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case : int =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''', torch_dtype=torch.floataa )
snake_case : List[str] =pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case : List[str] =pipe_prior(
_snake_case, image=_snake_case, strength=0.85, generator=_snake_case, negative_prompt='''''', ).to_tuple()
snake_case : List[str] =pipeline(
image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, hint=_snake_case, generator=_snake_case, num_inference_steps=100, height=512, width=512, strength=0.5, output_type='''np''', )
snake_case : List[str] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 349
| 1
|
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = abs(lowerCamelCase__ )
snake_case__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = abs(lowerCamelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return sum(int(lowerCamelCase__ ) for c in str(abs(lowerCamelCase__ ) ) )
def SCREAMING_SNAKE_CASE__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
snake_case__ = F"""{func.__name__}({value})"""
snake_case__ = timeit(F"""__main__.{call}""" , setup="import __main__" )
print(F"""{call:56} = {func(lowerCamelCase__ )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCamelCase__ , lowerCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 715
|
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 100 ):
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 530
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 409
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: List[str] =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =-1
SCREAMING_SNAKE_CASE_: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Any =TextStreamer(lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Tuple =cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: int =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =-1
SCREAMING_SNAKE_CASE_: List[Any] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE_: int =TextIteratorStreamer(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE_: Union[str, Any] =Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
SCREAMING_SNAKE_CASE_: Optional[int] =""""""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: str =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =-1
SCREAMING_SNAKE_CASE_: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: str =TextStreamer(lowerCAmelCase , skip_prompt=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Dict =cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""distilgpt2""" )
SCREAMING_SNAKE_CASE_: Dict =AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =-1
SCREAMING_SNAKE_CASE_: str =torch.ones((1, 5) , device=lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: List[str] =TextStreamer(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=1 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: Any =cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: Any =tokenizer(lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: Tuple =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =-1
SCREAMING_SNAKE_CASE_: Union[str, Any] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =TextIteratorStreamer(lowerCAmelCase , timeout=0.0_0_1 )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE_: Dict =Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =""""""
for new_text in streamer:
streamer_text += new_text
| 409
| 1
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : int , snake_case_ : List[str])-> List[Any]:
__lowerCAmelCase =3
__lowerCAmelCase =2_50
__lowerCAmelCase =ids_tensor((batch_size, length) , snake_case_)
__lowerCAmelCase =torch.ones((batch_size, length) , device=snake_case_ , dtype=torch.float) / length
return input_ids, scores
def UpperCamelCase ( self : int)-> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(5)
__lowerCAmelCase =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(snake_case_ , snake_case_))
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(9)
self.assertFalse(criteria(snake_case_ , snake_case_))
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(10)
self.assertTrue(criteria(snake_case_ , snake_case_))
def UpperCamelCase ( self : int)-> Union[str, Any]:
__lowerCAmelCase =MaxLengthCriteria(max_length=10)
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(5)
self.assertFalse(criteria(snake_case_ , snake_case_))
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(9)
self.assertFalse(criteria(snake_case_ , snake_case_))
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(10)
self.assertTrue(criteria(snake_case_ , snake_case_))
def UpperCamelCase ( self : Dict)-> Any:
__lowerCAmelCase =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(5)
self.assertFalse(criteria(snake_case_ , snake_case_))
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(9)
self.assertFalse(criteria(snake_case_ , snake_case_))
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(10)
self.assertTrue(criteria(snake_case_ , snake_case_))
__lowerCAmelCase =StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def UpperCamelCase ( self : Optional[int])-> Dict:
__lowerCAmelCase , __lowerCAmelCase =self._get_tensors(5)
__lowerCAmelCase =MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(snake_case_ , snake_case_))
__lowerCAmelCase =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(snake_case_ , snake_case_))
def UpperCamelCase ( self : Union[str, Any])-> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(snake_case_):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
__lowerCAmelCase =validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(snake_case_) , 1)
| 707
|
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list:
__lowerCAmelCase =int(__lowerCamelCase )
if n_element < 1:
__lowerCAmelCase =ValueError("""a should be a positive number""" )
raise my_error
__lowerCAmelCase =[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =(0, 0, 0)
__lowerCAmelCase =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowercase_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(F"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 456
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 296
|
'''simple docstring'''
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : str, UpperCamelCase__ : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase__, n - 1, UpperCamelCase__ ) * a) % mod
else:
SCREAMING_SNAKE_CASE__ : List[Any] =binary_exponentiation(UpperCamelCase__, n / 2, UpperCamelCase__ )
return (b * b) % mod
# a prime number
a_ = 7_0_1
a_ = 1_0_0_0_0_0_0_0_0_0
a_ = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 296
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE__ : List[str] = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : int = BigBirdForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , is_trivia_qa=SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 157
|
import numpy
# List of input, output pairs
_lowerCamelCase : List[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : List[Any] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Optional[int] = [2, 4, 1, 5]
_lowerCamelCase : List[str] = len(train_data)
_lowerCamelCase : Tuple = 0.009
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict="train" ) -> List[Any]:
'''simple docstring'''
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - output(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=m ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE__ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE__ ) * train_data[i][0][index]
return summation_value
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = summation_of_cost_derivative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / m
return cost_derivative_value
def _a ( ) -> Optional[int]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.0_0_0_0_0_2
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while True:
j += 1
SCREAMING_SNAKE_CASE__ : int = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ , rtol=SCREAMING_SNAKE_CASE__ , ):
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def _a ( ) -> List[str]:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 157
| 1
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : int = 101 ):
SCREAMING_SNAKE_CASE_ = length
def __len__( self : str ):
return self.length
def __getitem__( self : Dict , _lowerCAmelCase : str ):
return i
class lowerCamelCase_ :
'''simple docstring'''
def __call__( self : List[str] , _lowerCAmelCase : Optional[int] ):
return {"input_ids": torch.tensor(_lowerCAmelCase ), "labels": torch.tensor(_lowerCAmelCase )}
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
SCREAMING_SNAKE_CASE_ = nn.Linear(120 , 80 )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_neuroncore
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"--output_dir {output_dir}".split()
SCREAMING_SNAKE_CASE_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_multi_gpu
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"--output_dir {output_dir}".split()
SCREAMING_SNAKE_CASE_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase__ : int = HfArgumentParser((TrainingArguments,))
lowerCamelCase__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCamelCase__ : str = DummyDataset(dataset_length)
def UpperCAmelCase_ ( __UpperCAmelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE_ = list(range(len(__UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
lowerCamelCase__ : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase__ : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : List[str] = 2
lowerCamelCase__ : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : Union[str, Any] = None
| 31
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( A__ ):
lowercase__ = SwinConfig(image_size=192 )
if "base" in model_name:
lowercase__ = 6
lowercase__ = 128
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
elif "large" in model_name:
lowercase__ = 12
lowercase__ = 192
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
lowercase__ = window_size
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
return config
def _lowerCAmelCase ( A__ ):
if "encoder.mask_token" in name:
lowercase__ = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
lowercase__ = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
lowercase__ = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
lowercase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowercase__ = 'layernorm.weight'
if name == "encoder.norm.bias":
lowercase__ = 'layernorm.bias'
if "decoder" in name:
pass
else:
lowercase__ = 'swin.' + name
return name
def _lowerCAmelCase ( A__ , A__ ):
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase__ = key.split('.' )
lowercase__ = int(key_split[2] )
lowercase__ = int(key_split[4] )
lowercase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[
:dim
]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[
-dim:
]
else:
lowercase__ = val
return orig_state_dict
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = torch.load(A__ , map_location='cpu' )['model']
lowercase__ = get_swin_config(A__ )
lowercase__ = SwinForMaskedImageModeling(A__ )
model.eval()
lowercase__ = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = ViTImageProcessor(size={'height': 192, 'width': 192} )
lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw )
lowercase__ = image_processor(images=A__ , return_tensors='pt' )
with torch.no_grad():
lowercase__ = model(**A__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a__ : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 622
| 0
|
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , ) -> tuple[float | int, list[tuple[int, int]]]:
UpperCamelCase , UpperCamelCase = grid.shape
UpperCamelCase = [-1, 1, 0, 0]
UpperCamelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase , UpperCamelCase = [(0, source)], set()
UpperCamelCase = np.full((rows, cols) , np.inf )
UpperCamelCase = 0
UpperCamelCase = np.empty((rows, cols) , dtype=_lowercase )
UpperCamelCase = None
while queue:
((UpperCamelCase) , (UpperCamelCase)) = heappop(_lowercase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase , UpperCamelCase = predecessors[x, y]
path.append(_lowercase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowercase ) ):
UpperCamelCase , UpperCamelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowercase , (dist + 1, (nx, ny)) )
UpperCamelCase = dist + 1
UpperCamelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
|
from typing import Dict, Optional
import numpy as np
import datasets
_snake_case = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
_snake_case = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
_snake_case = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = False , ) -> List[str]:
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCamelCase = new_id
# turn into Numpy arrays
UpperCamelCase = np.array(_lowercase )
UpperCamelCase = np.array(_lowercase )
if reduce_labels:
UpperCamelCase = 255
UpperCamelCase = label - 1
UpperCamelCase = 255
UpperCamelCase = label != ignore_index
UpperCamelCase = np.not_equal(_lowercase , _lowercase )
UpperCamelCase = pred_label[mask]
UpperCamelCase = np.array(_lowercase )[mask]
UpperCamelCase = pred_label[pred_label == label]
UpperCamelCase = np.histogram(_lowercase , bins=_lowercase , range=(0, num_labels - 1) )[0]
UpperCamelCase = np.histogram(_lowercase , bins=_lowercase , range=(0, num_labels - 1) )[0]
UpperCamelCase = np.histogram(_lowercase , bins=_lowercase , range=(0, num_labels - 1) )[0]
UpperCamelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = False , ) -> Optional[Any]:
UpperCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowercase , _lowercase ):
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = intersect_and_union(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> List[Any]:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = total_intersect_and_union(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# compute metrics
UpperCamelCase = {}
UpperCamelCase = total_area_intersect.sum() / total_area_label.sum()
UpperCamelCase = total_area_intersect / total_area_union
UpperCamelCase = total_area_intersect / total_area_label
UpperCamelCase = np.nanmean(_lowercase )
UpperCamelCase = np.nanmean(_lowercase )
UpperCamelCase = all_acc
UpperCamelCase = iou
UpperCamelCase = acc
if nan_to_num is not None:
UpperCamelCase = {metric: np.nan_to_num(_lowercase , nan=_lowercase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
"""simple docstring"""
UpperCamelCase = mean_iou(
results=SCREAMING_SNAKE_CASE__ , gt_seg_maps=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , ignore_index=SCREAMING_SNAKE_CASE__ , nan_to_num=SCREAMING_SNAKE_CASE__ , label_map=SCREAMING_SNAKE_CASE__ , reduce_labels=SCREAMING_SNAKE_CASE__ , )
return iou_result
| 170
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 291
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase = logging.getLogger(__name__)
lowercase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase )} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowercase__ ( self : str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
SCREAMING_SNAKE_CASE = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowercase__ ( self : Tuple ):
if self.train_file is not None:
lowerCamelCase_ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase_ = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowerCamelCase_ = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ = refs
return Dataset.from_dict(UpperCAmelCase__ )
def __lowerCAmelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCamelCase_ = {}
if data_args.train_file is not None:
lowerCamelCase_ = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ = data_args.validation_file
lowerCamelCase_ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase_ = """text"""
lowerCamelCase_ = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase_ = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ = datasets["""train"""].column_names
else:
lowerCamelCase_ = datasets["""validation"""].column_names
lowerCamelCase_ = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase_ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ : Any ):
# Remove empty lines
lowerCamelCase_ = [line for line in examples["""text"""] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
lowerCamelCase_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase_ = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase_ = model_args.model_name_or_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowerCamelCase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase_ = trainer.evaluate()
lowerCamelCase_ = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase_ = perplexity
lowerCamelCase_ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 272
| 0
|
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase : List[Any] = list[tuple[int, int]]
__UpperCamelCase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCamelCase : Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> Tuple:
_A : Optional[int] = pos_x
_A : Any = pos_y
_A : List[Any] = (pos_y, pos_x)
_A : str = goal_x
_A : List[str] = goal_y
_A : Union[str, Any] = g_cost
_A : int = parent
_A : int = self.calculate_heuristic()
def _lowerCamelCase ( self ) -> float:
_A : Any = abs(self.pos_x - self.goal_x )
_A : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCAmelCase__ ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
_A : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase__ )
_A : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCAmelCase__ )
_A : List[Any] = [self.start]
_A : list[Node] = []
_A : str = False
def _lowerCamelCase ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_A : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_A : List[str] = True
return self.retrace_path(UpperCAmelCase__ )
self.closed_nodes.append(UpperCAmelCase__ )
_A : List[str] = self.get_successors(UpperCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase__ )
else:
# retrieve the best current path
_A : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase__ )
else:
self.open_nodes.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> list[Node]:
_A : str = []
for action in delta:
_A : Optional[Any] = parent.pos_x + action[1]
_A : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase__ , ) )
return successors
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Path:
_A : Any = node
_A : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_A : Optional[int] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = (0, 0)
__UpperCamelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__UpperCamelCase : Tuple = GreedyBestFirst(init, goal)
__UpperCamelCase : int = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__UpperCamelCase : int = 2
for elem in grid:
print(elem)
| 417
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = """ssube/stable-diffusion-x4-upscaler-onnx"""
def _lowerCamelCase ( self , UpperCAmelCase__=0 ) -> Any:
_A : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(UpperCAmelCase__ ) )
_A : Union[str, Any] = torch.manual_seed(UpperCAmelCase__ )
_A : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ) -> int:
_A : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Tuple = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**UpperCAmelCase__ ).images
_A : List[str] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> List[str]:
_A : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Union[str, Any] = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**UpperCAmelCase__ ).images
_A : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : Dict = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Dict = self.get_dummy_inputs()
_A : Tuple = pipe(**UpperCAmelCase__ ).images
_A : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> List[Any]:
_A : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Tuple = self.get_dummy_inputs()
_A : Optional[int] = pipe(**UpperCAmelCase__ ).images
_A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> Dict:
_A : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Dict = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**UpperCAmelCase__ ).images
_A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ) -> Optional[int]:
_A : int = ort.SessionOptions()
_A : Tuple = False
return options
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_A : Optional[Any] = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
_A : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Optional[int] = '''A fantasy landscape, trending on artstation'''
_A : Union[str, Any] = torch.manual_seed(0 )
_A : List[str] = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=UpperCAmelCase__ , output_type='''np''' , )
_A : Tuple = output.images
_A : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Optional[int] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_A : Optional[Any] = init_image.resize((1_2_8, 1_2_8) )
_A : Dict = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_A : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Optional[Any] = '''A fantasy landscape, trending on artstation'''
_A : Any = torch.manual_seed(0 )
_A : List[Any] = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=UpperCAmelCase__ , output_type='''np''' , )
_A : Optional[int] = output.images
_A : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : str = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 417
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( UpperCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase = '''rwkv'''
__UpperCamelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCamelCase : Tuple=50_277 , __lowerCamelCase : List[str]=1_024 , __lowerCamelCase : List[Any]=4_096 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=1E-5 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=6 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = context_length
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowercase = layer_norm_epsilon
__lowercase = rescale_every
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 375
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE_ : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = {}
state_dict.pop('pixel_mean' , snake_case )
state_dict.pop('pixel_std' , snake_case )
__lowercase = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
__lowercase = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
__lowercase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__lowercase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__lowercase = key.replace('layers.2' , 'proj_out' )
__lowercase = value
__lowercase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ) -> int:
__lowercase = hf_hub_download(snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__lowercase = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowercase = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowercase = SamConfig(
vision_config=snake_case , )
__lowercase = torch.load(snake_case , map_location='cpu' )
__lowercase = replace_keys(snake_case )
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(image_processor=snake_case )
__lowercase = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
__lowercase = hf_model.to('cuda' )
__lowercase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' )
__lowercase = [[[400, 650]]]
__lowercase = [[1]]
__lowercase = processor(images=np.array(snake_case ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__lowercase = ((75, 275, 1_725, 850),)
__lowercase = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__lowercase = [[[400, 650], [800, 650]]]
__lowercase = [[1, 1]]
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 375
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''xlnet'''
_a = ['''mems''']
_a = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: str , _lowerCAmelCase: int=3_20_00 , _lowerCAmelCase: str=10_24 , _lowerCAmelCase: Dict=24 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Union[str, Any]=40_96 , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Optional[int]="bi" , _lowerCAmelCase: Dict=0.02 , _lowerCAmelCase: str=1e-1_2 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: Any=5_12 , _lowerCAmelCase: int=None , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: List[Any]=-1 , _lowerCAmelCase: str=False , _lowerCAmelCase: Tuple="last" , _lowerCAmelCase: str=True , _lowerCAmelCase: Tuple="tanh" , _lowerCAmelCase: List[str]=0.1 , _lowerCAmelCase: int=5 , _lowerCAmelCase: Optional[int]=5 , _lowerCAmelCase: Dict=5 , _lowerCAmelCase: Optional[Any]=1 , _lowerCAmelCase: Tuple=2 , **_lowerCAmelCase: Optional[int] , ):
lowercase :Any = vocab_size
lowercase :List[Any] = d_model
lowercase :str = n_layer
lowercase :Dict = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowercase :Dict = d_model // n_head
lowercase :List[Any] = ff_activation
lowercase :str = d_inner
lowercase :Dict = untie_r
lowercase :List[str] = attn_type
lowercase :Tuple = initializer_range
lowercase :str = layer_norm_eps
lowercase :List[str] = dropout
lowercase :List[Any] = mem_len
lowercase :Dict = reuse_len
lowercase :List[str] = bi_data
lowercase :Any = clamp_len
lowercase :List[str] = same_length
lowercase :Union[str, Any] = summary_type
lowercase :str = summary_use_proj
lowercase :List[str] = summary_activation
lowercase :List[str] = summary_last_dropout
lowercase :List[str] = start_n_top
lowercase :Union[str, Any] = end_n_top
lowercase :Dict = bos_token_id
lowercase :Tuple = pad_token_id
lowercase :Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , _lowerCAmelCase , )
lowercase :Dict = kwargs["use_cache"]
lowercase :Any = use_mems_eval
lowercase :Tuple = use_mems_train
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: List[str] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 453
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Optional[int] = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 453
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466
|
'''simple docstring'''
import math
def _UpperCAmelCase ( __A : int ):
a_ : str = []
a_ : Tuple = 2
a_ : Optional[Any] = int(math.sqrt(__A ) ) # Size of every segment
a_ : Optional[Any] = [True] * (end + 1)
a_ : Union[str, Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(__A )
for i in range(start * start , end + 1 , __A ):
a_ : List[Any] = False
start += 1
prime += in_prime
a_ : Any = end + 1
a_ : Optional[Any] = min(2 * end , __A )
while low <= n:
a_ : Optional[Any] = [True] * (high - low + 1)
for each in in_prime:
a_ : Any = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__A , high + 1 , __A ):
a_ : Union[str, Any] = False
for j in range(len(__A ) ):
if temp[j] is True:
prime.append(j + low )
a_ : Tuple = high + 1
a_ : str = min(high + end , __A )
return prime
print(sieve(10**6))
| 466
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class snake_case ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_choices
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxAlbertModelTester(self )
@slow
def _lowercase (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class snake_case ( unittest.TestCase ):
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE_ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
SCREAMING_SNAKE_CASE_ = (1, 11, 7_68)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 628
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index
| 628
| 1
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
A__ : set[int] = {ord(char) for char in VALID_CHARS}
A__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : tuple[int, ...] ) -> str | None:
__lowerCamelCase : str = ""
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
for keychar, cipherchar in zip(cycle(UpperCAmelCase_ ) , UpperCAmelCase_ ):
__lowerCamelCase : Union[str, Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCAmelCase_ )
return decoded
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[str]:
__lowerCamelCase : list[str] = []
for key in product(UpperCAmelCase_ , repeat=3 ):
__lowerCamelCase : int = try_key(UpperCAmelCase_ , UpperCAmelCase_ )
if encoded is not None:
possibles.append(UpperCAmelCase_ )
return possibles
def UpperCAmelCase__ ( UpperCAmelCase_ : list[str] , UpperCAmelCase_ : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p059_cipher.txt" ) -> int:
__lowerCamelCase : list[int]
__lowerCamelCase : list[str]
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : str = Path(UpperCAmelCase_ ).parent.joinpath(UpperCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCamelCase : Tuple = [int(UpperCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCamelCase : Union[str, Any] = filter_valid_chars(UpperCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCamelCase : Any = filter_common_word(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
break
__lowerCamelCase : int = possibles[0]
return sum(ord(UpperCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
|
'''simple docstring'''
from PIL import Image
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Image:
def brightness(SCREAMING_SNAKE_CASE_ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE = change_brightness(img, 1_00)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 366
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__UpperCamelCase : Any = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__lowerCamelCase : Any = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
__lowerCamelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase : List[Any] = value
elif weight_type == "weight_g":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
__lowerCamelCase : List[Any] = value
elif weight_type == "bias":
__lowerCamelCase : Optional[int] = value
else:
__lowerCamelCase : Dict = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Tuple = []
__lowerCamelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCamelCase : Optional[Any] = hf_model.feature_extractor
__lowerCamelCase : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
__lowerCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase : Any = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase : Union[str, Any] = True
if "*" in mapped_key:
__lowerCamelCase : int = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
__lowerCamelCase : Dict = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
__lowerCamelCase : int = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : List[str] = """weight_v"""
elif "bias" in name:
__lowerCamelCase : str = """bias"""
elif "weight" in name:
__lowerCamelCase : List[str] = """weight"""
else:
__lowerCamelCase : int = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase : Tuple = name.split(""".""" )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = full_name.split("""adaptor.""" )[-1]
__lowerCamelCase : Any = name.split(""".""" )
if items[1].isdigit():
__lowerCamelCase : Dict = int(items[1] )
else:
__lowerCamelCase : List[str] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__lowerCamelCase : str = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__lowerCamelCase : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__lowerCamelCase : Optional[int] = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__lowerCamelCase : List[str] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__lowerCamelCase : int = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__lowerCamelCase : Tuple = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Tuple = emb.weight.shape
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , ):
"""simple docstring"""
__lowerCamelCase : int = WavaVecaConfig.from_pretrained(
UpperCAmelCase , add_adapter=UpperCAmelCase , adapter_stride=UpperCAmelCase , adapter_kernel_size=UpperCAmelCase , use_auth_token=UpperCAmelCase , output_hidden_size=UpperCAmelCase , )
__lowerCamelCase : Optional[int] = MBartConfig.from_pretrained(UpperCAmelCase )
# load model
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
__lowerCamelCase : Union[str, Any] = model[0].eval()
# load feature extractor
__lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , use_auth_token=UpperCAmelCase )
# set weights for wav2vec2 encoder
__lowerCamelCase : Tuple = WavaVecaModel(UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
# load decoder weights
__lowerCamelCase : Dict = MBartForCausalLM(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCamelCase : List[Any] = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[str] = MBartaaTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : int = hf_wavavec.config.to_dict()
__lowerCamelCase : str = tokenizer.pad_token_id
__lowerCamelCase : Optional[Any] = tokenizer.bos_token_id
__lowerCamelCase : Dict = tokenizer.eos_token_id
__lowerCamelCase : Tuple = """mbart50"""
__lowerCamelCase : List[str] = """wav2vec2"""
__lowerCamelCase : List[str] = tokenizer.eos_token_id
__lowerCamelCase : Optional[int] = 250_004
__lowerCamelCase : Dict = tokenizer.eos_token_id
__lowerCamelCase : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
__UpperCamelCase : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 458
| 1
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Dict =k_size // 2
lowerCamelCase__ , lowerCamelCase__: Any =mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase__: Dict =1 / (2 * pi * sigma) * exp(-(square(__a ) + square(__a )) / (2 * square(__a )) )
return g
def lowerCAmelCase_ ( __a , __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: str =image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase__: Dict =height - k_size + 1
lowerCamelCase__: Tuple =width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase__: Optional[int] =zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase__: Dict =0
for i, j in product(range(__a ) , range(__a ) ):
lowerCamelCase__: Dict =ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase__: Optional[int] =window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase__: List[Any] =gen_gaussian_kernel(__a , __a )
lowerCamelCase__: str =ravel(__a )
# reshape and get the dst image
lowerCamelCase__: Dict =dot(__a , __a ).reshape(__a , __a ).astype(__a )
return dst
if __name__ == "__main__":
# read original image
__A = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A = gaussian_filter(gray, 3, sigma=1)
__A = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 59
|
import operator as op
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
lowerCamelCase__: Tuple ={
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
else:
lowerCamelCase__: List[Any] =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
lowerCamelCase__: Optional[Any] =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 59
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase =None
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase ={
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowerCamelCase ={
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowerCamelCase =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Any = vocab_file
UpperCamelCase__ : int = False if not self.vocab_file else True
UpperCamelCase__ : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase__ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase__ : List[Any] = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase__ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self.sep_token_id]
UpperCamelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ : Optional[Any] = src_lang
UpperCamelCase__ : Optional[Any] = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : int = src_lang
UpperCamelCase__ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = []
UpperCamelCase__ : List[str] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCamelCase__ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 462
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , '''decord''' )
self.check_model_type(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = {}
if frame_sampling_rate is not None:
UpperCamelCase__ : Tuple = frame_sampling_rate
if num_frames is not None:
UpperCamelCase__ : str = num_frames
UpperCamelCase__ : List[str] = {}
if top_k is not None:
UpperCamelCase__ : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 ) -> Optional[Any]:
"""simple docstring"""
if num_frames is None:
UpperCamelCase__ : Optional[Any] = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
UpperCamelCase__ : str = BytesIO(requests.get(__SCREAMING_SNAKE_CASE ).content )
UpperCamelCase__ : Tuple = VideoReader(__SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = num_frames * frame_sampling_rate - 1
UpperCamelCase__ : List[Any] = np.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num=__SCREAMING_SNAKE_CASE , dtype=np.intaa )
UpperCamelCase__ : str = videoreader.get_batch(__SCREAMING_SNAKE_CASE ).asnumpy()
UpperCamelCase__ : Tuple = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model(**__SCREAMING_SNAKE_CASE )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 ) -> str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase__ : Dict = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ : Any = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ ,UpperCamelCase__ : List[str] = probs.topk(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCamelCase__ : Any = scores.tolist()
UpperCamelCase__ : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
| 462
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__( a_ ):
UpperCamelCase : List[str] = "Speech2TextFeatureExtractor"
UpperCamelCase : str = "Speech2TextTokenizer"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.feature_extractor
__lowercase = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__lowercase = kwargs.pop("""raw_speech""" )
else:
__lowercase = kwargs.pop("""audio""" , __UpperCAmelCase )
__lowercase = kwargs.pop("""sampling_rate""" , __UpperCAmelCase )
__lowercase = kwargs.pop("""text""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__lowercase = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
__lowercase = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase = encodings["""input_ids"""]
return inputs
def __magic_name__ ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__ ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def __magic_name__ ( self ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__lowercase = True
__lowercase = self.tokenizer
yield
__lowercase = self.feature_extractor
__lowercase = False
| 566
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a_ ( _A ) -> Any:
"""simple docstring"""
snake_case__ = VideoMAEConfig()
set_architecture_configs(_A , _A )
if "finetuned" not in model_name:
snake_case__ = False
if "finetuned" in model_name:
snake_case__ = 'huggingface/label-files'
if "kinetics" in model_name:
snake_case__ = 400
snake_case__ = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
snake_case__ = 174
snake_case__ = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
snake_case__ = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
snake_case__ = {int(_A ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
return config
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if "small" in model_name:
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 16
snake_case__ = 12
snake_case__ = 3
snake_case__ = 192
snake_case__ = 768
elif "large" in model_name:
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
snake_case__ = 12
snake_case__ = 8
snake_case__ = 512
snake_case__ = 2048
elif "huge" in model_name:
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
snake_case__ = 12
snake_case__ = 8
snake_case__ = 640
snake_case__ = 2560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def a_ ( _A ) -> Dict:
"""simple docstring"""
if "encoder." in name:
snake_case__ = name.replace('encoder.' , '' )
if "cls_token" in name:
snake_case__ = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
snake_case__ = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
snake_case__ = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
snake_case__ = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case__ = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
snake_case__ = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
snake_case__ = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
snake_case__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
snake_case__ = name.replace('attn' , 'attention.self' )
if "attn" in name:
snake_case__ = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
snake_case__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case__ = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
snake_case__ = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
snake_case__ = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
snake_case__ = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case__ = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case__ = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
snake_case__ = name.replace('head' , 'classifier' )
return name
def a_ ( _A , _A ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(_A )
if key.startswith('encoder.' ):
snake_case__ = key.replace('encoder.' , '' )
if "qkv" in key:
snake_case__ = key.split('.' )
if key.startswith('decoder.blocks' ):
snake_case__ = config.decoder_hidden_size
snake_case__ = int(key_split[2] )
snake_case__ = 'decoder.decoder_layers.'
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[dim : dim * 2, :]
snake_case__ = val[-dim:, :]
else:
snake_case__ = config.hidden_size
snake_case__ = int(key_split[1] )
snake_case__ = 'videomae.encoder.layer.'
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[dim : dim * 2, :]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val
return orig_state_dict
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
snake_case__ = np.load(_A )
return list(_A )
def a_ ( _A , _A , _A , _A ) -> Dict:
"""simple docstring"""
snake_case__ = get_videomae_config(_A )
if "finetuned" in model_name:
snake_case__ = VideoMAEForVideoClassification(_A )
else:
snake_case__ = VideoMAEForPreTraining(_A )
# download original checkpoint, hosted on Google Drive
snake_case__ = 'pytorch_model.bin'
gdown.cached_download(_A , _A , quiet=_A )
snake_case__ = torch.load(_A , map_location='cpu' )
if "model" in files:
snake_case__ = files['model']
else:
snake_case__ = files['module']
snake_case__ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
model.eval()
# verify model on basic input
snake_case__ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case__ = prepare_video()
snake_case__ = image_processor(_A , return_tensors='pt' )
if "finetuned" not in model_name:
snake_case__ = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
snake_case__ = torch.load(_A )
snake_case__ = model(**_A )
snake_case__ = outputs.logits
snake_case__ = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case__ = torch.Size([1, 174] )
snake_case__ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case__ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case__ = torch.Size([1, 174] )
snake_case__ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case__ = torch.Size([1, 174] )
snake_case__ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _A , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case__ = outputs.loss
assert torch.allclose(_A , _A , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
model.save_pretrained(_A )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(_A , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 328
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
A__ = logging.getLogger(__name__)
A__ = {'''facebook/bart-base''': BartForConditionalGeneration}
A__ = {'''facebook/bart-base''': BartTokenizer}
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=lowerCAmelCase__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--config_name''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=lowerCAmelCase__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Where to store the final ONNX file.''' )
snake_case__ : Any = parser.parse_args()
return args
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase="cpu" ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = model_dict[model_name].from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case__ : str = tokenizer_dict[model_name].from_pretrained(lowerCAmelCase__ )
if model_name in ["facebook/bart-base"]:
snake_case__ : Tuple = 0
snake_case__ : Tuple = None
snake_case__ : List[Any] = 0
return huggingface_model, tokenizer
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
model.eval()
snake_case__ : List[str] = None
snake_case__ : Dict = torch.jit.script(BARTBeamSearchGenerator(lowerCAmelCase__ ) )
with torch.no_grad():
snake_case__ : Any = '''My friends are cool but they eat too many carbs.'''
snake_case__ : str = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
snake_case__ : Tuple = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=lowerCAmelCase__ , max_length=lowerCAmelCase__ , early_stopping=lowerCAmelCase__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowerCAmelCase__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowerCAmelCase__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=lowerCAmelCase__ , )
logger.info('''Model exported to {}'''.format(lowerCAmelCase__ ) )
snake_case__ : str = remove_dup_initializers(os.path.abspath(lowerCAmelCase__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(lowerCAmelCase__ ) )
snake_case__ : Dict = onnxruntime.InferenceSession(lowerCAmelCase__ )
snake_case__ : Any = ort_sess.run(
lowerCAmelCase__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(lowerCAmelCase__ ),
'''max_length''': np.array(lowerCAmelCase__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = parse_args()
snake_case__ : int = 5
snake_case__ : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case__ : List[str] = torch.device(args.device )
snake_case__ , snake_case__ : str = load_model_tokenizer(args.model_name_or_path , lowerCAmelCase__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(lowerCAmelCase__ )
if args.max_length:
snake_case__ : Union[str, Any] = args.max_length
if args.num_beams:
snake_case__ : Dict = args.num_beams
if args.output_file_path:
snake_case__ : Optional[Any] = args.output_file_path
else:
snake_case__ : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 714
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[int] = """markuplm"""
def __init__( self :int ,__lowercase :str=3_0_5_2_2 ,__lowercase :str=7_6_8 ,__lowercase :str=1_2 ,__lowercase :Dict=1_2 ,__lowercase :Optional[Any]=3_0_7_2 ,__lowercase :Any="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Any=5_1_2 ,__lowercase :List[Any]=2 ,__lowercase :Tuple=0.02 ,__lowercase :List[Any]=1e-1_2 ,__lowercase :List[Any]=0 ,__lowercase :Optional[int]=0 ,__lowercase :str=2 ,__lowercase :Optional[Any]=2_5_6 ,__lowercase :List[str]=1_0_2_4 ,__lowercase :List[str]=2_1_6 ,__lowercase :Union[str, Any]=1_0_0_1 ,__lowercase :int=3_2 ,__lowercase :Union[str, Any]=5_0 ,__lowercase :Optional[Any]="absolute" ,__lowercase :int=True ,__lowercase :Optional[Any]=None ,**__lowercase :Union[str, Any] ,):
super().__init__(
pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase ,)
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : str = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Optional[Any] = use_cache
snake_case__ : Optional[Any] = classifier_dropout
# additional properties
snake_case__ : Any = max_depth
snake_case__ : Optional[Any] = max_xpath_tag_unit_embeddings
snake_case__ : Dict = max_xpath_subs_unit_embeddings
snake_case__ : str = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : List[str] = xpath_unit_hidden_size
| 219
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=None , _snake_case : Dict=None ) -> int:
'''simple docstring'''
if attention_mask is None:
_A = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int = OPTConfig
UpperCAmelCase : Any = {}
UpperCAmelCase : Any = '''gelu'''
def __init__( self : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=4 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Any=16 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
_A = embed_dim
_A = word_embed_proj_dim
_A = False
def lowerCAmelCase_ ( self : Any ):
_A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A = tf.concat([input_ids, eos_tensor] , axis=1 )
_A = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **self.config_updates , )
_A = prepare_opt_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
_A = TFOPTModel(config=SCREAMING_SNAKE_CASE__ )
_A = inputs_dict['''input_ids''']
_A = input_ids[:1, :]
_A = inputs_dict['''attention_mask'''][:1, :]
_A = 1
# first forward pass
_A = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
_A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_A = tf.concat([input_ids, next_tokens] , axis=-1 )
_A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_A = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
_A = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_A = output_from_no_past[:, -3:, random_slice_idx]
_A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-3 )
@require_tf
class lowercase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCAmelCase : Union[str, Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCAmelCase : Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Optional[Any] = 10
def lowerCAmelCase_ ( self : int ):
_A = TFOPTModelTester(self )
_A = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ):
if hasattr(SCREAMING_SNAKE_CASE__ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(SCREAMING_SNAKE_CASE__ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_A = model_class(config=SCREAMING_SNAKE_CASE__ )
_A = _get_word_embedding_weight(SCREAMING_SNAKE_CASE__ , model.get_input_embeddings() )
_A = _get_word_embedding_weight(SCREAMING_SNAKE_CASE__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(SCREAMING_SNAKE_CASE__ )
_A = _get_word_embedding_weight(SCREAMING_SNAKE_CASE__ , model.get_input_embeddings() )
_A = _get_word_embedding_weight(SCREAMING_SNAKE_CASE__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_A = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , SCREAMING_SNAKE_CASE__ )
# check that weights remain the same after resizing
_A = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_A = False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , SCREAMING_SNAKE_CASE__ )
_A = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_A = False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
def _snake_case ( _snake_case : int ) -> Tuple:
'''simple docstring'''
return tf.constant(snake_case__ , dtype=tf.intaa )
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = 99
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_A = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_A = input_ids.shape[0]
_A = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : str ):
_A = TFOPTModel.from_pretrained('facebook/opt-350m' )
_A = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_A = tf.not_equal(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id )
with tf.GradientTape():
_A = model(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).last_hidden_state
_A = (1, 11, 512)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
_A = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=4E-3 ) )
_A = tf.function(SCREAMING_SNAKE_CASE__ , jit_compile=SCREAMING_SNAKE_CASE__ )
_A = xla_generate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=4E-2 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
super().setUp()
_A = '''facebook/opt-350m'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = TFOPTForCausalLM.from_pretrained(self.path_model )
_A = GPTaTokenizer.from_pretrained(self.path_model )
_A = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_A = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
_A = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_A = tf.constant(
[
[1.3851, -13.8_923, -10.5_229, -10.7_533, -0.2309, -10.2_384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6_276, -3.9415, -21.5_242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1_650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7_926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
_A = tf.function(SCREAMING_SNAKE_CASE__ , jit_compile=SCREAMING_SNAKE_CASE__ )
_A = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : int ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase_ ( self : List[Any] ):
_A = '''facebook/opt-125m'''
_A = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_A = []
_A = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_A = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
for prompt in self.prompts:
_A = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='tf' ).input_ids
_A = model.generate(SCREAMING_SNAKE_CASE__ , max_length=10 )
_A = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self : List[str] ):
_A = '''facebook/opt-350m'''
_A = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_A = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
_A = '''left'''
# use different length sentences to test batching
_A = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_A = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE__ )
_A = inputs['''input_ids''']
_A = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=inputs['attention_mask'] )
_A = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_A = model.generate(input_ids=SCREAMING_SNAKE_CASE__ )
_A = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_A = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_A = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , max_length=model.config.max_length - num_paddings )
_A = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_A = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_A = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_A = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [non_padded_sentence, padded_sentence] )
def lowerCAmelCase_ ( self : Dict ):
_A = '''facebook/opt-350m'''
_A = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_A = []
_A = GPTaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_A = TFOPTForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
for prompt in self.prompts:
_A = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='tf' ).input_ids
_A = model.generate(SCREAMING_SNAKE_CASE__ , max_length=10 )
_A = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 7
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCamelCase__ :List[Any] = logging.get_logger(__name__)
UpperCamelCase__ :Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCamelCase__ :List[str] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
UpperCamelCase__ :List[Any] = {
"""RUCAIBox/mvp""": 1_024,
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = MvpTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
_UpperCamelCase :Optional[int] = add_prefix_space
_UpperCamelCase :List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCamelCase :Any = '''post_processor'''
_UpperCamelCase :Optional[Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
_UpperCamelCase :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase :Optional[Any] = tuple(state['''sep'''] )
if "cls" in state:
_UpperCamelCase :Optional[Any] = tuple(state['''cls'''] )
_UpperCamelCase :str = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_UpperCamelCase :Optional[int] = add_prefix_space
_UpperCamelCase :Union[str, Any] = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
_UpperCamelCase :Union[str, Any] = trim_offsets
_UpperCamelCase :Optional[Any] = True
if changes_to_apply:
_UpperCamelCase :Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
_UpperCamelCase :Optional[Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
_UpperCamelCase :List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
_UpperCamelCase :int = value
def _UpperCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase :int = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase :Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
_UpperCamelCase :Any = [self.sep_token_id]
_UpperCamelCase :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 355
| 0
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : List[str] = '''T5Config'''
class _lowerCamelCase (a__ ):
lowercase__ = """mt5"""
lowercase__ = MTaConfig
class _lowerCamelCase (a__ ):
lowercase__ = """mt5"""
lowercase__ = MTaConfig
class _lowerCamelCase (a__ ):
lowercase__ = """mt5"""
lowercase__ = MTaConfig
| 709
|
from __future__ import annotations
def __lowercase( __snake_case : list[int] ,__snake_case : list[int] ,__snake_case : list[int] ,__snake_case : list[list[str]] ,__snake_case : int ,) -> None:
__snake_case = len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,__snake_case ,__snake_case ,)
def __lowercase( __snake_case : int ) -> None:
__snake_case = []
depth_first_search([] ,[] ,[] ,__snake_case ,__snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print('' )
print(len(__snake_case ) ,'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 345
| 0
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( a__ : Any , a__ : Optional[Any] ) -> Union[str, Any]:
# Load checkpoint
_UpperCamelCase = torch.load(a__ , map_location='''cpu''' )
_UpperCamelCase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_UpperCamelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_UpperCamelCase = v
else:
_UpperCamelCase = v
_UpperCamelCase = chkpt["""params"""]
_UpperCamelCase = {n: v for n, v in config.items() if not isinstance(a__ , (torch.FloatTensor, numpy.ndarray) )}
_UpperCamelCase = chkpt["""dico_word2id"""]
_UpperCamelCase = {s + """</w>""" if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
_UpperCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_UpperCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_UpperCamelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(a__ , a__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 420
|
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17
| 0
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCamelCase__ ( a ):
'''simple docstring'''
def snake_case ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if tokenize_kwargs is None:
__lowerCAmelCase : Union[str, Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
__lowerCAmelCase : List[Any] = truncation
__lowerCAmelCase : Union[str, Any] = tokenize_kwargs
__lowerCAmelCase : Optional[Any] = {}
if return_tensors is not None:
__lowerCAmelCase : Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
__lowerCAmelCase : int = self.framework
__lowerCAmelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return model_inputs
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
__lowerCAmelCase : List[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 123
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCAmelCase ) - np.asarray(_UpperCAmelCase )) ** 2 ) )
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def A ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
benchmark()
| 123
| 1
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase_ , n - 1 , lowerCamelCase_ ) * a) % mod
else:
_lowercase : str = binary_exponentiation(lowerCamelCase_ , n / 2 , lowerCamelCase_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : Optional[int] = 1000000000
SCREAMING_SNAKE_CASE : Optional[int] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 89
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = """▁"""
UpperCAmelCase_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ : Tuple = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase_ : Optional[int] = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : int="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : List[Any]="</s>" , lowercase_ : List[str]="<s>" , lowercase_ : Dict="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple="<mask>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token
SCREAMING_SNAKE_CASE_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase_))
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.sp_model) + self.fairseq_offset
SCREAMING_SNAKE_CASE_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return [1] + ([0] * len(lowercase_)) + [1]
return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) + [1]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : str):
'''simple docstring'''
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : int):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : int = self.sp_model.PieceToId(lowercase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[str]):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , '''wb''') as fi:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 704
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Any):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_)
requires_backends(self , '''vision''')
self.check_model_type(lowercase_)
def __call__( self : int , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[Any]):
'''simple docstring'''
return super().__call__(lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowercase_ : List[str]):
'''simple docstring'''
return {}, {}, {}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = load_image(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.size
SCREAMING_SNAKE_CASE_ : int = self.image_processor(images=lowercase_ , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.model(**lowercase_)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[str] = (output * 255 / np.max(lowercase_)).astype('''uint8''')
SCREAMING_SNAKE_CASE_ : List[Any] = Image.fromarray(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = predicted_depth
SCREAMING_SNAKE_CASE_ : Optional[int] = depth
return output_dict
| 176
| 0
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _snake_case (unittest.TestCase):
__A : List[str] =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[int] = hf_hub_download(
repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" )
UpperCAmelCase_ : Union[str, Any] = VideoClassificationPipeline(model=_snake_case ,image_processor=_snake_case ,top_k=2 )
UpperCAmelCase_ : int = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
for example in examples:
UpperCAmelCase_ : Any = video_classifier(_snake_case )
self.assertEqual(
_snake_case ,[
{"score": ANY(_snake_case ), "label": ANY(_snake_case )},
{"score": ANY(_snake_case ), "label": ANY(_snake_case )},
] ,)
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCAmelCase_ : Tuple = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} ,crop_size={"height": 10, "width": 10} )
UpperCAmelCase_ : str = pipeline(
"video-classification" ,model=_snake_case ,feature_extractor=_snake_case ,frame_sampling_rate=4 )
UpperCAmelCase_ : int = hf_hub_download(repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" )
UpperCAmelCase_ : Any = video_classifier(_snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] ,)
UpperCAmelCase_ : Optional[Any] = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] ,)
@require_tf
def UpperCamelCase__ ( self ):
pass
| 71
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = np.shape(lowerCAmelCase )
if rows != columns:
_lowerCAmelCase = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(lowerCAmelCase )
_lowerCAmelCase = np.zeros((rows, columns) )
_lowerCAmelCase = np.zeros((rows, columns) )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_lowerCAmelCase = (table[i][j] - total) / upper[j][j]
_lowerCAmelCase = 1
for j in range(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
_lowerCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_SCREAMING_SNAKE_CASE = get_logger()
_SCREAMING_SNAKE_CASE = None
class _lowerCAmelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , __snake_case : Optional[Any]=None , __snake_case : Optional[Any]=None , **__snake_case : Optional[int] )-> Dict:
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
snake_case = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case = str(jax.devices()[0] )
snake_case = jnp_array_kwargs
@staticmethod
def lowerCAmelCase ( )-> str:
import jax
return {str(__A ): device for device in jax.devices()}
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str )-> int:
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def lowerCAmelCase ( self : Dict , __snake_case : Dict )-> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case = {"dtype": jnp.intaa}
else:
snake_case = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase ( self : Tuple , __snake_case : Optional[int] )-> Dict:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , """__array__""" ) and not isinstance(__A , jax.Array ):
snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def lowerCAmelCase ( self : Tuple , __snake_case : dict )-> Tuple:
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : pa.Table )-> str:
snake_case = self.numpy_arrow_extractor().extract_row(__A )
snake_case = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def lowerCAmelCase ( self : Any , __snake_case : pa.Table )-> int:
snake_case = self.numpy_arrow_extractor().extract_column(__A )
snake_case = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case = self.recursive_tensorize(__A )
snake_case = self._consolidate(__A )
return column
def lowerCAmelCase ( self : Optional[int] , __snake_case : pa.Table )-> Any:
snake_case = self.numpy_arrow_extractor().extract_batch(__A )
snake_case = self.python_features_decoder.decode_batch(__A )
snake_case = self.recursive_tensorize(__A )
for column_name in batch:
snake_case = self._consolidate(batch[column_name] )
return batch
| 711
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : list[int] ) -> int:
snake_case = len(__lowerCAmelCase ) // 2
# choose the middle 3 elements
snake_case = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A_ : int = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
A_ : str = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
A_ : Optional[int] = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 768 , )->Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
A_ : Any = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->Tuple:
'''simple docstring'''
A_ : Optional[Any] = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
A_ : str = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : List[str] = (embeds * self.std) + self.mean
return embeds
| 590
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =None
a_ =BloomTokenizerFast
a_ =BloomTokenizerFast
a_ =True
a_ =False
a_ ="""tokenizer_file"""
a_ ={"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase__ = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowerCAmelCase__ = tokenizer.batch_encode_plus(__UpperCAmelCase )["input_ids"]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=6 )-> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase__ = "This is a simple input"
lowerCAmelCase__ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase__ = ("This is a simple input", "This is a pair")
lowerCAmelCase__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(__UpperCAmelCase , max_length=__UpperCAmelCase )
tokenizer_r.encode_plus(__UpperCAmelCase , max_length=__UpperCAmelCase )
tokenizer_r.batch_encode_plus(__UpperCAmelCase , max_length=__UpperCAmelCase )
tokenizer_r.encode(__UpperCAmelCase , max_length=__UpperCAmelCase )
tokenizer_r.batch_encode_plus(__UpperCAmelCase , max_length=__UpperCAmelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase__ = None # Hotfixing padding = None
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = load_dataset("xnli" , "all_languages" , split="test" , streaming=__UpperCAmelCase )
lowerCAmelCase__ = next(iter(__UpperCAmelCase ) )["premise"] # pick up one data
lowerCAmelCase__ = list(sample_data.values() )
lowerCAmelCase__ = list(map(tokenizer.encode , __UpperCAmelCase ) )
lowerCAmelCase__ = [tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 115
| 0
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : List[str] = split_dict._to_yaml_list()
assert len(__snake_case ) == len(__snake_case )
UpperCAmelCase__ : str = SplitDict._from_yaml_list(__snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase__ : int = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase__ : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=__snake_case ), SplitInfo(dataset_name="""my_dataset""" )] )
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 182
|
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] ):
'''simple docstring'''
if not nums:
return 0
UpperCAmelCase_ : int = nums[0]
UpperCAmelCase_ : Any = 0
for num in nums[1:]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406
| 0
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (EulerDiscreteScheduler,)
lowercase = 10
def snake_case_ ( self , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self ) -> str:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 667
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 1
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "new-model"
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "bert-base-cased"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = "bert-base-cased"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : int ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
@require_tensorflow_probability
def _snake_case ( self : Union[str, Any] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 )
def _snake_case ( self : List[Any] ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE = ["FunnelBaseModel"]
SCREAMING_SNAKE_CASE = TFAutoModel.from_config(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
try:
AutoConfig.register("new-model" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__lowerCamelCase ):
auto_class.register(__lowerCamelCase , __lowerCamelCase )
auto_class.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
auto_class.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE = auto_class.from_config(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = auto_class.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _snake_case ( self : Optional[int] ):
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" )
def _snake_case ( self : Any ):
with self.assertRaisesRegex(
__lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def _snake_case ( self : str ):
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self : Tuple ):
with self.assertRaisesRegex(__lowerCamelCase , "Use `from_pt=True` to load this model" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _snake_case ( self : int ):
# Make sure we have cached the model.
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 16
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_snake_case = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_snake_case = [0, 25, 50]
_snake_case = [25, 50, 75]
_snake_case = fuzz.membership.trimf(X, abca)
_snake_case = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_snake_case = np.ones(75)
_snake_case = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_snake_case = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_snake_case = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_snake_case = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_snake_case = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_snake_case = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_snake_case = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_snake_case = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_snake_case = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="roc_bert"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=9_10 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : int=2_48_58 , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = enable_pronunciation
UpperCamelCase = enable_shape
UpperCamelCase = pronunciation_embed_dim
UpperCamelCase = pronunciation_vocab_size
UpperCamelCase = shape_embed_dim
UpperCamelCase = shape_vocab_size
UpperCamelCase = concat_input
UpperCamelCase = position_embedding_type
UpperCamelCase = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 170
| 0
|
"""simple docstring"""
import logging
import os
from .state import PartialState
class a_ ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( __UpperCamelCase : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) ->str:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
_UpperCAmelCase = kwargs.pop("""main_process_only""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""in_order""" , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase ,_UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase ,_UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( _A , _A = None ) -> str:
"""simple docstring"""
if log_level is None:
_UpperCAmelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" , _A )
_UpperCAmelCase = logging.getLogger(_A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_A , {} )
| 555
|
"""simple docstring"""
import math
class a_ :
def _snake_case ( self : List[Any] , __UpperCamelCase : list[list[float]] , __UpperCamelCase : list[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _snake_case ( self : Dict , __UpperCamelCase : list[list[int | float]] , __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : float ) ->list[list[int | float]]:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(_A ):
for j in range(len(_A ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(_A , _A )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(_A , _A , _A , _A )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(_A , _A )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 555
| 1
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 1000 ):
"""simple docstring"""
lowerCamelCase__ : str =3
lowerCamelCase__ : Optional[int] =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 708
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625
| 0
|
'''simple docstring'''
from itertools import permutations
def _UpperCamelCase (_lowerCamelCase : tuple )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case = [7, 11, 13, 17]
for i, test in enumerate(_lowerCamelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _UpperCamelCase (_lowerCamelCase : int = 10 )-> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowerCamelCase , _lowerCamelCase ) ) )
for num in permutations(range(_lowerCamelCase ) )
if is_substring_divisible(_lowerCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24
|
"""simple docstring"""
def a ( __UpperCAmelCase : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__magic_name__: Dict = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = StableUnCLIPPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCAmelCase = False
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[str] = 32
lowerCamelCase :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=__snake_case , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase :Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__snake_case , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase :List[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=__snake_case , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase :Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
lowerCamelCase :Union[str, Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase :Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase :List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
lowerCamelCase :List[str] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = AutoencoderKL()
lowerCamelCase :int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def snake_case ( self : int , __snake_case : Tuple , __snake_case : str=0 ):
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :int = torch.manual_seed(__snake_case )
else:
lowerCamelCase :str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Dict = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any ):
lowerCamelCase :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowerCamelCase :List[str] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase :Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase :Optional[Any] = pipe('''anime turle''' , generator=__snake_case , output_type='''np''' )
lowerCamelCase :Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase :str = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowerCamelCase :Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase :Optional[int] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 1
|
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> bool:
'''simple docstring'''
return len(set(__snake_case ) ) == len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
|
from collections import defaultdict
from math import gcd
def lowercase_ ( __snake_case : int = 1_50_00_00 ) -> int:
'''simple docstring'''
snake_case__ :defaultdict = defaultdict(__snake_case )
snake_case__ :List[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
snake_case__ :Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 241
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=14 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : str = seq_length
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : str = use_mc_token_ids
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Optional[int] = num_choices
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Any = self.vocab_size - 1
def snake_case__ ( self):
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : List[str] = None
if self.use_mc_token_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : int = self.get_config()
UpperCAmelCase__ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Dict = CTRLModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
model(_lowerCamelCase , token_type_ids=_lowerCamelCase , head_mask=_lowerCamelCase)
model(_lowerCamelCase , token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Dict = CTRLLMHeadModel(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : int = CTRLForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : str = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase :Tuple = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase :Any = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase :Union[str, Any] = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase :List[Any] = True
lowerCAmelCase :Optional[Any] = False
lowerCAmelCase :Tuple = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = CTRLModelTester(self)
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCamelCase , n_embd=37)
def snake_case__ ( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCamelCase)
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def snake_case__ ( self):
pass
@slow
def snake_case__ ( self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = CTRLModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@unittest.skip("""The model doesn't support left padding""") # and it's not used enough to be worth fixing :)
def snake_case__ ( self):
pass
@require_torch
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = CTRLLMHeadModel.from_pretrained("""ctrl""")
model.to(_lowerCamelCase)
UpperCAmelCase__ : Any = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=_lowerCamelCase) # Legal the president is
UpperCAmelCase__ : List[Any] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase__ : List[str] = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase)
self.assertListEqual(output_ids[0].tolist() , _lowerCamelCase)
| 113
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _snake_case ( a__ ):
lowerCAmelCase :Any = '''xlm'''
lowerCAmelCase :Any = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , _lowerCamelCase=3_0145 , _lowerCamelCase=2048 , _lowerCamelCase=12 , _lowerCamelCase=16 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=True , _lowerCamelCase=512 , _lowerCamelCase=2048**-0.5 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=5 , _lowerCamelCase=True , _lowerCamelCase="first" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=5 , _lowerCamelCase=5 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Tuple = emb_dim
UpperCAmelCase__ : Optional[Any] = n_layers
UpperCAmelCase__ : List[str] = n_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Optional[int] = attention_dropout
UpperCAmelCase__ : Tuple = gelu_activation
UpperCAmelCase__ : Optional[Any] = sinusoidal_embeddings
UpperCAmelCase__ : int = causal
UpperCAmelCase__ : Union[str, Any] = asm
UpperCAmelCase__ : Optional[Any] = n_langs
UpperCAmelCase__ : List[Any] = use_lang_emb
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = bos_index
UpperCAmelCase__ : List[Any] = eos_index
UpperCAmelCase__ : int = pad_index
UpperCAmelCase__ : str = unk_index
UpperCAmelCase__ : Dict = mask_index
UpperCAmelCase__ : str = is_encoder
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Any = embed_init_std
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : List[str] = summary_type
UpperCAmelCase__ : Union[str, Any] = summary_use_proj
UpperCAmelCase__ : Any = summary_activation
UpperCAmelCase__ : List[str] = summary_proj_to_labels
UpperCAmelCase__ : Union[str, Any] = summary_first_dropout
UpperCAmelCase__ : str = start_n_top
UpperCAmelCase__ : str = end_n_top
UpperCAmelCase__ : Tuple = mask_token_id
UpperCAmelCase__ : Union[str, Any] = lang_id
if "n_words" in kwargs:
UpperCAmelCase__ : List[str] = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , **_lowerCamelCase)
class _snake_case ( a__ ):
@property
def snake_case__ ( self):
if self.task == "multiple-choice":
UpperCAmelCase__ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 113
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__A : int = logging.getLogger(__name__)
def lowerCAmelCase_ ( a : List[str] , a : int ):
# save results
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCAmelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCAmelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def lowerCAmelCase_ ( a : Any , a : Optional[int]=False ):
a__ = 2
if unlogit:
a__ = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
a__ = p * torch.log(lowerCAmelCase_ )
a__ = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase_ ( a : Union[str, Any] ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCAmelCase_ ( a : Optional[int] , a : List[str] , a : Dict , a : str=True , a : Optional[Any]=True , a : List[str]=None , a : Dict=False ):
a__ = model.config.num_hidden_layers, model.config.num_attention_heads
a__ = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
a__ = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
a__ = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a__ = None
a__ = 0.0
a__ = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
a__ = tuple(t.to(args.device ) for t in inputs )
(a__ ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a__ = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
a__ = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a__ = 2
a__ = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
a__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('Head ranked by importance scores' )
a__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a__ = torch.arange(
head_importance.numel() , device=args.device )
a__ = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase_ ( a : List[str] , a : Tuple , a : Union[str, Any] ):
a__ = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
a__ = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCAmelCase_ , original_score * args.masking_threshold )
a__ = torch.ones_like(lowerCAmelCase_ )
a__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a__ = original_score
while current_score >= original_score * args.masking_threshold:
a__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a__ = float('Inf' )
a__ = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
a__ = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
a__ = new_head_mask.view(-1 )
a__ = 0.0
a__ = new_head_mask.view_as(lowerCAmelCase_ )
a__ = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
a__ = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
a__ = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase_ ( a : Union[str, Any] , a : Dict , a : str , a : Optional[int] ):
a__ = datetime.now()
a__ = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
a__ = 1 / loss
a__ = datetime.now() - before_time
a__ = sum(p.numel() for p in model.parameters() )
a__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
a__ = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
a__ = sum(p.numel() for p in model.parameters() )
a__ = datetime.now()
a__ = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
a__ = 1 / loss
a__ = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCAmelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCAmelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCAmelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCAmelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCAmelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCAmelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowerCAmelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCAmelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('--local_rank' , type=lowerCAmelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCAmelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCAmelCase_ , default='' , help='Can be used for distant debugging.' )
a__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a__ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
a__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a__ = torch.device('cuda' , args.local_rank )
a__ = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a__ = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
a__ = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCAmelCase_ )
# Prepare dataset
a__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a__ = (torch.from_numpy(lowerCAmelCase_ ),)
a__ = TensorDataset(*lowerCAmelCase_ )
a__ = RandomSampler(lowerCAmelCase_ )
a__ = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a__ = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 394
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[int] , __a : List[str]=14 , __a : Optional[Any]=7 , __a : List[Any]=True , __a : Tuple=True , __a : Union[str, Any]=True , __a : Any=True , __a : Any=True , __a : Dict=99 , __a : List[Any]=32 , __a : Union[str, Any]=5 , __a : List[Any]=4 , __a : Tuple=37 , __a : Dict="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : Optional[int]=512 , __a : Union[str, Any]=16 , __a : Tuple=2 , __a : Tuple=0.02 , __a : List[str]=3 , __a : Tuple=4 , __a : int=None , ) -> int:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : Optional[int] = batch_size
__lowercase : int = seq_length
__lowercase : Any = is_training
__lowercase : str = use_token_type_ids
__lowercase : Dict = use_input_mask
__lowercase : Tuple = use_labels
__lowercase : Optional[Any] = use_mc_token_ids
__lowercase : int = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : List[Any] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : List[str] = scope
__lowercase : Optional[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Dict = None
if self.use_mc_token_ids:
__lowercase : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowercase : Tuple = None
__lowercase : int = None
__lowercase : Any = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Dict = self.get_config()
__lowercase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : str , __a : Optional[int] , __a : Any , __a : Union[str, Any] , *__a : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
__lowercase : int = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase ( self : Any , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : Union[str, Any] , __a : Optional[Any] , *__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : str = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , __a : int , __a : Dict , __a : str , __a : List[str] , *__a : str ) -> int:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : Optional[Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_A : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
_A : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : str = True
_A : List[Any] = False
_A : List[Any] = False
def lowerCAmelCase ( self : int , __a : Tuple , __a : int , __a : str , __a : int , __a : Dict ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = CTRLModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , n_embd=37 )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__a )
__lowercase : str = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
__lowercase : Union[str, Any] = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowercase : List[Any] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 149
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
UpperCamelCase_ = datasets.load_iris()
UpperCamelCase_ = np.array(data["data"])
UpperCamelCase_ = np.array(data["target"])
UpperCamelCase_ = data["target_names"]
UpperCamelCase_ = train_test_split(X, y)
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=5 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
UpperCAmelCase_ = []
for data_point in data:
UpperCAmelCase_ = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase_ = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase_ = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 710
|
UpperCamelCase_ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = set()
# keep track of all the paths to be checked
UpperCAmelCase_ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase_ = queue.pop(0 )
# get the last node from the path
UpperCAmelCase_ = path[-1]
if node not in explored:
UpperCAmelCase_ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase_ = list(__UpperCAmelCase )
new_path.append(__UpperCAmelCase )
queue.append(__UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase_ = [start]
UpperCAmelCase_ = set(__UpperCAmelCase )
# Keep tab on distances from `start` node.
UpperCAmelCase_ = {start: 0, target: -1}
while queue:
UpperCAmelCase_ = queue.pop(0 )
if node == target:
UpperCAmelCase_ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCAmelCase )
queue.append(__UpperCAmelCase )
UpperCAmelCase_ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 561
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : str = tokenizer(example["""content"""] , truncation=__UpperCamelCase )["""input_ids"""]
UpperCAmelCase__ : str = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
__UpperCAmelCase = HfArgumentParser(PretokenizationArguments)
__UpperCAmelCase = parser.parse_args()
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__UpperCAmelCase = time.time()
__UpperCAmelCase = load_dataset(args.dataset_name, split='train')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
__UpperCAmelCase = time.time()
__UpperCAmelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
__UpperCAmelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 65
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=__lowerCamelCase ):
snake_case_ = ["""onnx"""]
def __init__( self : int ,*A : List[str] ,**A : int ):
'''simple docstring'''
requires_backends(self ,["""onnx"""] )
@classmethod
def __lowercase ( cls : Optional[Any] ,*A : List[str] ,**A : Dict ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
@classmethod
def __lowercase ( cls : List[Any] ,*A : Optional[int] ,**A : int ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
| 65
| 1
|
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
a_ : int = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
a_ : List[Any] = numpy_to_pil(SCREAMING_SNAKE_CASE__ )
return images
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if images.ndim == 3:
a_ : Optional[Any] = images[None, ...]
a_ : Tuple = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a_ : List[str] = [Image.fromarray(image.squeeze(), mode="L" ) for image in images]
else:
a_ : Union[str, Any] = [Image.fromarray(SCREAMING_SNAKE_CASE__ ) for image in images]
return pil_images
| 370
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( a_ ):
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "ViltImageProcessor"
__lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a_=None , a_=None , **a_ ):
a_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
a_ : List[Any] = kwargs.pop("feature_extractor" )
a_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
a_ : Dict = self.image_processor
def __call__( self , a_ , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
a_ : Union[str, Any] = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
a_ : List[Any] = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def snake_case_ ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def snake_case_ ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def snake_case_ ( self ):
a_ : Union[str, Any] = self.tokenizer.model_input_names
a_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case_ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 370
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> List[Any]:
_A = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
_A = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
_A = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_A = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_A = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
_A = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_A = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
_A = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
_A = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_A = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_A = key[key.find('''block''' ) + len('''block''' )]
_A = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
_A = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_A = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_A = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_A = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_A = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_A = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_A = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_A = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_A = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_A = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
_A = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
_A = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
_A = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
_A = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
_A = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
_A = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
_A = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
_A = key.replace('''module.last_layer_depth''' , '''head.head''' )
_A = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Any ) -> str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_A = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_A = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_A = kv_weight[
: config.hidden_sizes[i], :
]
_A = kv_bias[: config.hidden_sizes[i]]
_A = kv_weight[
config.hidden_sizes[i] :, :
]
_A = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Any , _snake_case :str=False , _snake_case :Tuple=None ) -> Any:
_A = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_A = GLPNImageProcessor()
# prepare image
_A = prepare_img()
_A = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
_A = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
_A = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
_A = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
_A = model(_snake_case )
_A = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_A = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_A = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_A = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
UpperCAmelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 2
|
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2
| 1
|
from __future__ import annotations
lowercase : Dict = """Muhammad Umer Farooq"""
lowercase : int = """MIT"""
lowercase : Dict = """1.0.0"""
lowercase : Optional[int] = """Muhammad Umer Farooq"""
lowercase : Optional[Any] = """contact@muhammadumerfarooq.me"""
lowercase : Optional[Any] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , A_ : str ) -> None:
"""simple docstring"""
super().__init__()
lowerCamelCase_: list[str] = []
lowerCamelCase_: str = domain
def lowerCAmelCase ( self : Dict , A_ : str , A_ : list[tuple[str, str | None]] ) -> None:
"""simple docstring"""
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCamelCase_: Tuple = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split(""".""" )[-2:] )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return parse.urlparse(_UpperCAmelCase ).netloc
def UpperCAmelCase_ ( _UpperCAmelCase = "https://github.com" ):
lowerCamelCase_: List[str] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
lowerCamelCase_: Union[str, Any] = Parser(_UpperCAmelCase )
try:
# Open URL
lowerCamelCase_: Tuple = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCamelCase_: Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCamelCase_: Any = requests.get(_UpperCAmelCase )
# Get the valid email.
lowerCamelCase_: Any = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
lowercase : int = emails_from_url("""https://github.com""")
print(F"{len(emails)} emails found:")
print("""\n""".join(sorted(emails)))
| 721
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = DistilBertTokenizer
_A = DistilBertTokenizerFast
_A = True
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Any = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowerCamelCase_: str = tokenizer.encode("""sequence builders""" , add_special_tokens=A_ )
lowerCamelCase_: List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A_ )
lowerCamelCase_: int = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 584
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ :List[str] = logging.get_logger(__name__)
def A_ ( snake_case__ , snake_case__=False ) -> Optional[int]:
_UpperCamelCase :List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_UpperCamelCase :Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def A_ ( snake_case__ , snake_case__ , snake_case__=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase :str = ''''''
else:
_UpperCamelCase :Dict = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase :Any = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
_UpperCamelCase :List[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase :Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase :Union[str, Any] = in_proj_bias[: config.hidden_size]
_UpperCamelCase :str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase :int = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase :Optional[Any] = in_proj_bias[-config.hidden_size :]
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
_UpperCamelCase :Dict = dct.pop(lowerCAmelCase_ )
_UpperCamelCase :Optional[Any] = val
def A_ ( ) -> List[Any]:
_UpperCamelCase :str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase :Optional[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def A_ ( snake_case__ , snake_case__ ) -> Dict:
_UpperCamelCase :Tuple = DeiTConfig()
# all deit models have fine-tuned heads
_UpperCamelCase :Optional[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCamelCase :Optional[Any] = 10_00
_UpperCamelCase :List[Any] = '''huggingface/label-files'''
_UpperCamelCase :Optional[int] = '''imagenet-1k-id2label.json'''
_UpperCamelCase :Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase :int = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase :List[str] = idalabel
_UpperCamelCase :List[str] = {v: k for k, v in idalabel.items()}
_UpperCamelCase :Union[str, Any] = int(deit_name[-6:-4] )
_UpperCamelCase :str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_UpperCamelCase :Optional[int] = 1_92
_UpperCamelCase :Any = 7_68
_UpperCamelCase :List[Any] = 12
_UpperCamelCase :int = 3
elif deit_name[9:].startswith('''small''' ):
_UpperCamelCase :Any = 3_84
_UpperCamelCase :Any = 15_36
_UpperCamelCase :Optional[int] = 12
_UpperCamelCase :Tuple = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_UpperCamelCase :str = 10_24
_UpperCamelCase :Union[str, Any] = 40_96
_UpperCamelCase :Optional[Any] = 24
_UpperCamelCase :Any = 16
# load original model from timm
_UpperCamelCase :Optional[Any] = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase :List[str] = timm_model.state_dict()
_UpperCamelCase :Tuple = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
_UpperCamelCase :str = DeiTForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_UpperCamelCase :Dict = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_UpperCamelCase :Union[str, Any] = DeiTImageProcessor(size=lowerCAmelCase_ , crop_size=config.image_size )
_UpperCamelCase :List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCamelCase :Any = encoding['''pixel_values''']
_UpperCamelCase :str = model(lowerCAmelCase_ )
_UpperCamelCase :Tuple = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase__ :Optional[int] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 355
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
| 103
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ) -> Optional[Any]:
__UpperCAmelCase = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return image
def __lowerCAmelCase ( A_ : List[Any] ) -> List[str]:
__UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( A_ : Optional[int] , A_ : int , A_ : str ) -> List[Any]:
__UpperCAmelCase = dct.pop(A_ )
__UpperCAmelCase = val
def __lowerCAmelCase ( A_ : Optional[int] , A_ : Optional[int] ) -> int:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(A_ , requires_grad=A_ ), v_bias) )
__UpperCAmelCase = qkv_bias
def __lowerCAmelCase ( A_ : Any ) -> int:
__UpperCAmelCase = 3_64 if "coco" in model_name else 2_24
__UpperCAmelCase = InstructBlipVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
__UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__UpperCAmelCase = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
__UpperCAmelCase = InstructBlipConfig(vision_config=A_ , text_config=A_ , qformer_config=A_ )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( A_ : int , A_ : Union[str, Any]=None , A_ : Optional[Any]=False ) -> Dict:
__UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
__UpperCAmelCase = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__UpperCAmelCase = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
__UpperCAmelCase , __UpperCAmelCase = get_blipa_config(A_ )
__UpperCAmelCase = InstructBlipForConditionalGeneration(A_ ).eval()
__UpperCAmelCase = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
__UpperCAmelCase , __UpperCAmelCase = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__UpperCAmelCase = "cuda:1" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase = "cuda:2" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = load_model_and_preprocess(
name=A_ , model_type=A_ , is_eval=A_ , device=A_ )
original_model.eval()
print("Done!" )
# update state dict keys
__UpperCAmelCase = original_model.state_dict()
__UpperCAmelCase = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(A_ )
if key.startswith("Qformer.bert" ):
__UpperCAmelCase = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__UpperCAmelCase = key.replace("self" , "attention" )
if "llm_proj" in key:
__UpperCAmelCase = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
__UpperCAmelCase = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
__UpperCAmelCase = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
__UpperCAmelCase = key.replace("t5" , "language" )
__UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(A_ , A_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A_ , strict=A_ )
__UpperCAmelCase = load_demo_image()
__UpperCAmelCase = "What is unusual about this image?"
# create processor
__UpperCAmelCase = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=A_ , image_std=A_ )
__UpperCAmelCase = InstructBlipProcessor(
image_processor=A_ , tokenizer=A_ , qformer_tokenizer=A_ , )
__UpperCAmelCase = processor(images=A_ , text=A_ , return_tensors="pt" ).to(A_ )
# make sure processor creates exact same pixel values
__UpperCAmelCase = vis_processors["eval"](A_ ).unsqueeze(0 ).to(A_ )
__UpperCAmelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "vicuna" in model_name:
__UpperCAmelCase = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
__UpperCAmelCase = hf_model(**A_ ).logits
else:
__UpperCAmelCase = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
__UpperCAmelCase = tokenizer("\n" , return_tensors="pt" ).input_ids.to(A_ )
__UpperCAmelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCAmelCase = hf_model(**A_ , labels=A_ ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__UpperCAmelCase = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , A_ , atol=A_ )
print("Looks ok!" )
print("Generating with original model..." )
__UpperCAmelCase = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
__UpperCAmelCase = hf_model.generate(
**A_ , do_sample=A_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__UpperCAmelCase = 2
print("Original generation:" , A_ )
__UpperCAmelCase = processor.batch_decode(A_ , skip_special_tokens=A_ )
__UpperCAmelCase = [text.strip() for text in output_text]
print("HF generation:" , A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ) -> Optional[Any]:
__UpperCAmelCase = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return image
def __lowerCAmelCase ( A_ : List[Any] ) -> List[str]:
__UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( A_ : Optional[int] , A_ : int , A_ : str ) -> List[Any]:
__UpperCAmelCase = dct.pop(A_ )
__UpperCAmelCase = val
def __lowerCAmelCase ( A_ : Optional[int] , A_ : Optional[int] ) -> int:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(A_ , requires_grad=A_ ), v_bias) )
__UpperCAmelCase = qkv_bias
def __lowerCAmelCase ( A_ : Any ) -> int:
__UpperCAmelCase = 3_64 if "coco" in model_name else 2_24
__UpperCAmelCase = InstructBlipVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
__UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__UpperCAmelCase = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
__UpperCAmelCase = InstructBlipConfig(vision_config=A_ , text_config=A_ , qformer_config=A_ )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( A_ : int , A_ : Union[str, Any]=None , A_ : Optional[Any]=False ) -> Dict:
__UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
__UpperCAmelCase = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__UpperCAmelCase = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
__UpperCAmelCase , __UpperCAmelCase = get_blipa_config(A_ )
__UpperCAmelCase = InstructBlipForConditionalGeneration(A_ ).eval()
__UpperCAmelCase = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
__UpperCAmelCase , __UpperCAmelCase = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__UpperCAmelCase = "cuda:1" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase = "cuda:2" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = load_model_and_preprocess(
name=A_ , model_type=A_ , is_eval=A_ , device=A_ )
original_model.eval()
print("Done!" )
# update state dict keys
__UpperCAmelCase = original_model.state_dict()
__UpperCAmelCase = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(A_ )
if key.startswith("Qformer.bert" ):
__UpperCAmelCase = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__UpperCAmelCase = key.replace("self" , "attention" )
if "llm_proj" in key:
__UpperCAmelCase = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
__UpperCAmelCase = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
__UpperCAmelCase = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
__UpperCAmelCase = key.replace("t5" , "language" )
__UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(A_ , A_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A_ , strict=A_ )
__UpperCAmelCase = load_demo_image()
__UpperCAmelCase = "What is unusual about this image?"
# create processor
__UpperCAmelCase = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=A_ , image_std=A_ )
__UpperCAmelCase = InstructBlipProcessor(
image_processor=A_ , tokenizer=A_ , qformer_tokenizer=A_ , )
__UpperCAmelCase = processor(images=A_ , text=A_ , return_tensors="pt" ).to(A_ )
# make sure processor creates exact same pixel values
__UpperCAmelCase = vis_processors["eval"](A_ ).unsqueeze(0 ).to(A_ )
__UpperCAmelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "vicuna" in model_name:
__UpperCAmelCase = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
__UpperCAmelCase = hf_model(**A_ ).logits
else:
__UpperCAmelCase = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
__UpperCAmelCase = tokenizer("\n" , return_tensors="pt" ).input_ids.to(A_ )
__UpperCAmelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCAmelCase = hf_model(**A_ , labels=A_ ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__UpperCAmelCase = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , A_ , atol=A_ )
print("Looks ok!" )
print("Generating with original model..." )
__UpperCAmelCase = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
__UpperCAmelCase = hf_model.generate(
**A_ , do_sample=A_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__UpperCAmelCase = 2
print("Original generation:" , A_ )
__UpperCAmelCase = processor.batch_decode(A_ , skip_special_tokens=A_ )
__UpperCAmelCase = [text.strip() for text in output_text]
print("HF generation:" , A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 286
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = CycleDiffusionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCAmelCase__ : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase__ : int = CLIPTextModel(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase__ : Dict = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ : str = torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase__ : Tuple = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : List[str] = CycleDiffusionPipeline(**_lowerCAmelCase )
UpperCAmelCase__ : Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = pipe(**_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = output.images
UpperCAmelCase__ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
UpperCAmelCase__ : Dict = module.half()
UpperCAmelCase__ : Optional[int] = CycleDiffusionPipeline(**_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = pipe(**_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __UpperCAmelCase ( self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def __UpperCAmelCase ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def __UpperCAmelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCAmelCase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __UpperCAmelCase ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
UpperCAmelCase__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
UpperCAmelCase__ : Any = init_image.resize((512, 512) )
UpperCAmelCase__ : int = """CompVis/stable-diffusion-v1-4"""
UpperCAmelCase__ : Optional[int] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
UpperCAmelCase__ : Tuple = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase__ : int = """A black colored car"""
UpperCAmelCase__ : Union[str, Any] = """A blue colored car"""
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
UpperCAmelCase__ : Optional[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
UpperCAmelCase__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
UpperCAmelCase__ : Optional[int] = init_image.resize((512, 512) )
UpperCAmelCase__ : List[Any] = """CompVis/stable-diffusion-v1-4"""
UpperCAmelCase__ : Dict = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
UpperCAmelCase__ : Dict = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase__ : List[Any] = """A black colored car"""
UpperCAmelCase__ : Any = """A blue colored car"""
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
UpperCAmelCase__ : Optional[int] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 79
|
import argparse
import datetime
def __lowercase ( __lowerCAmelCase : str ):
a__ = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
a__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__lowerCAmelCase ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
a__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
a__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
a__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
a__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
a__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
a__ = datetime.date(int(__lowerCAmelCase ) , int(__lowerCAmelCase ) , int(__lowerCAmelCase ) )
# Start math
if m <= 2:
a__ = y - 1
a__ = m + 1_2
# maths var
a__ = int(str(__lowerCAmelCase )[:2] )
a__ = int(str(__lowerCAmelCase )[2:] )
a__ = int(2.6 * m - 5.39 )
a__ = int(c / 4 )
a__ = int(k / 4 )
a__ = int(d + k )
a__ = int(t + u + v + x )
a__ = int(z - (2 * c) )
a__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
a__ = F'Your date {date_input}, is a {days[str(__lowerCAmelCase )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
snake_case : str = parser.parse_args()
zeller(args.date_input)
| 335
| 0
|
from __future__ import annotations
import typing
from collections import Counter
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a , max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a ):
SCREAMING_SNAKE_CASE_ : List[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A_ ( a = 1_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pythagorean_triple(a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 353
|
def A_ ( a ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
SCREAMING_SNAKE_CASE_ : List[str] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_ : Any = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_ : int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowerCAmelCase : Union[str, Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 353
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase_ : str = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def A_ (__a , __a=None , __a=None , __a=None ):
'''simple docstring'''
A_ = True
while ask_again:
A_ = input(__a )
try:
if default is not None and len(__a ) == 0:
return default
return convert_value(__a ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__a )
def A_ (__a , __a=[] , __a=None , __a=0 ):
'''simple docstring'''
A_ = BulletMenu(__a , __a )
A_ = menu.run(default_choice=__a )
return convert_value(__a ) if convert_value is not None else result
def A_ (__a ):
'''simple docstring'''
A_ = int(__a )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def A_ (__a ):
'''simple docstring'''
A_ = int(__a )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def A_ (__a ):
'''simple docstring'''
A_ = int(__a )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A_ (__a ):
'''simple docstring'''
A_ = int(__a )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def A_ (__a ):
'''simple docstring'''
A_ = int(__a )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def A_ (__a ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __lowerCAmelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any ) -> List[Any]:
"""simple docstring"""
A_ = super()._format_usage(_snake_case , _snake_case , _snake_case , _snake_case )
A_ = usage.replace("<command> [<args>] " , "" )
return usage
| 115
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A_ (__a , __a , __a , __a , __a=True , __a="pt" ):
'''simple docstring'''
A_ = {"add_prefix_space": True} if isinstance(__a , __a ) and not line.startswith(" " ) else {}
A_ = padding_side
return tokenizer(
[line] , max_length=__a , padding="max_length" if pad_to_max_length else None , truncation=__a , return_tensors=__a , add_special_tokens=__a , **__a , )
def A_ (__a , __a , __a=None , ):
'''simple docstring'''
A_ = input_ids.ne(__a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Dict="train" , _snake_case : List[Any]=None , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , _snake_case : Any="" , ) -> List[str]:
"""simple docstring"""
super().__init__()
A_ = Path(_snake_case ).joinpath(type_path + ".source" )
A_ = Path(_snake_case ).joinpath(type_path + ".target" )
A_ = self.get_char_lens(self.src_file )
A_ = max_source_length
A_ = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
A_ = tokenizer
A_ = prefix
if n_obs is not None:
A_ = self.src_lens[:n_obs]
A_ = src_lang
A_ = tgt_lang
def __len__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , _snake_case : Optional[int] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ = index + 1 # linecache starts at 1
A_ = self.prefix + linecache.getline(str(self.src_file ) , _snake_case ).rstrip("\n" )
A_ = linecache.getline(str(self.tgt_file ) , _snake_case ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _snake_case ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _snake_case ) else self.tokenizer
)
A_ = self.tokenizer.generator if isinstance(self.tokenizer , _snake_case ) else self.tokenizer
A_ = encode_line(_snake_case , _snake_case , self.max_source_length , "right" )
A_ = encode_line(_snake_case , _snake_case , self.max_target_length , "right" )
A_ = source_inputs["input_ids"].squeeze()
A_ = target_inputs["input_ids"].squeeze()
A_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase__ ( _snake_case : List[Any] ) -> str:
"""simple docstring"""
return [len(_snake_case ) for x in Path(_snake_case ).open().readlines()]
def lowerCamelCase__ ( self : Any , _snake_case : List[str] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ = torch.stack([x["input_ids"] for x in batch] )
A_ = torch.stack([x["attention_mask"] for x in batch] )
A_ = torch.stack([x["decoder_input_ids"] for x in batch] )
A_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _snake_case )
else self.tokenizer.pad_token_id
)
A_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _snake_case )
else self.tokenizer.pad_token_id
)
A_ = trim_batch(_snake_case , _snake_case )
A_ , A_ = trim_batch(_snake_case , _snake_case , attention_mask=_snake_case )
A_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase_ : Any = getLogger(__name__)
def A_ (__a ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__a ) )
def A_ (__a ):
'''simple docstring'''
A_ = get_git_info()
save_json(__a , os.path.join(__a , "git_log.json" ) )
def A_ (__a , __a , __a=4 , **__a ):
'''simple docstring'''
with open(__a , "w" ) as f:
json.dump(__a , __a , indent=__a , **__a )
def A_ (__a ):
'''simple docstring'''
with open(__a ) as f:
return json.load(__a )
def A_ ():
'''simple docstring'''
A_ = git.Repo(search_parent_directories=__a )
A_ = {
"repo_id": str(__a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def A_ (__a , __a ):
'''simple docstring'''
return list(map(__a , __a ) )
def A_ (__a , __a ):
'''simple docstring'''
with open(__a , "wb" ) as f:
return pickle.dump(__a , __a )
def A_ (__a ):
'''simple docstring'''
def remove_articles(__a ):
return re.sub(R"\b(a|an|the)\b" , " " , __a )
def white_space_fix(__a ):
return " ".join(text.split() )
def remove_punc(__a ):
A_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def A_ (__a , __a ):
'''simple docstring'''
A_ = normalize_answer(__a ).split()
A_ = normalize_answer(__a ).split()
A_ = Counter(__a ) & Counter(__a )
A_ = sum(common.values() )
if num_same == 0:
return 0
A_ = 1.0 * num_same / len(__a )
A_ = 1.0 * num_same / len(__a )
A_ = (2 * precision * recall) / (precision + recall)
return fa
def A_ (__a , __a ):
'''simple docstring'''
return normalize_answer(__a ) == normalize_answer(__a )
def A_ (__a , __a ):
'''simple docstring'''
assert len(__a ) == len(__a )
A_ = 0
for hypo, pred in zip(__a , __a ):
em += exact_match_score(__a , __a )
if len(__a ) > 0:
em /= len(__a )
return {"em": em}
def A_ (__a ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ = "dropout_rate"
for p in extra_params:
if getattr(__a , __a , __a ):
if not hasattr(__a , __a ) and not hasattr(__a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__a ) )
delattr(__a , __a )
continue
A_ = p if hasattr(__a , __a ) else equivalent_param[p]
setattr(__a , __a , getattr(__a , __a ) )
delattr(__a , __a )
return hparams, config
| 115
| 1
|
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )
__lowercase = Namespace(**checkpoint['''cfg''']['''model'''] )
__lowercase = checkpoint['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
__lowercase = XGLMConfig(
vocab_size=A__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowercase = XGLMForCausalLM(A__ )
__lowercase = model.load_state_dict(A__ , strict=A__ )
print(A__ )
__lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 624
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624
| 1
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCamelCase__ =version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
UpperCamelCase__ ='\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
UpperCamelCase__ ='\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
UpperCamelCase__ ='\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.9 , __lowerCamelCase=3 , __lowerCamelCase=0.5 ) -> str:
if NLTK_VERSION >= version.Version("3.6.5" ):
_SCREAMING_SNAKE_CASE : List[str] = [
meteor_score.single_meteor_score(
word_tokenize(__lowerCamelCase ) , word_tokenize(__lowerCamelCase ) , alpha=__lowerCamelCase , beta=__lowerCamelCase , gamma=__lowerCamelCase )
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase )
]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
meteor_score.single_meteor_score(__lowerCamelCase , __lowerCamelCase , alpha=__lowerCamelCase , beta=__lowerCamelCase , gamma=__lowerCamelCase )
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase )
]
return {"meteor": np.mean(__lowerCamelCase )}
| 249
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if exponent == 1:
return base
if exponent % 2 == 0:
_SCREAMING_SNAKE_CASE : Any = _modexpt(__lowerCamelCase, exponent // 2, __lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowerCamelCase, exponent - 1, __lowerCamelCase )) % modulo_value
def lowerCamelCase__ (__lowerCamelCase = 1777, __lowerCamelCase = 1855, __lowerCamelCase = 8 ):
_SCREAMING_SNAKE_CASE : Tuple = base
for _ in range(1, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = _modexpt(__lowerCamelCase, __lowerCamelCase, 10**digits )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 249
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 708
|
def lowerCamelCase__ ( _lowercase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 300
| 0
|
"""simple docstring"""
def lowercase__ ( snake_case_ :list ):
__UpperCAmelCase = 0
while len(snake_case_ ) > 1:
__UpperCAmelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__UpperCAmelCase = files.index(min(snake_case_ ) )
temp += files[min_index]
files.pop(snake_case_ )
files.append(snake_case_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# Initialise PyTorch model
_lowerCAmelCase = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
_lowerCAmelCase = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
_lowerCAmelCase = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
a__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 589
| 0
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
UpperCamelCase_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ : List[Any] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase_ : Dict = {
"""Salesforce/codegen-350M-mono""": 2_048,
}
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ = CodeGenTokenizer
def __init__( self : Optional[Any] ,a__ : str=None ,a__ : Dict=None ,a__ : str=None ,a__ : List[str]="<|endoftext|>" ,a__ : Any="<|endoftext|>" ,a__ : Dict="<|endoftext|>" ,a__ : List[Any]=False ,**a__ : Any ,):
super().__init__(
a__ ,a__ ,tokenizer_file=a__ ,unk_token=a__ ,bos_token=a__ ,eos_token=a__ ,add_prefix_space=a__ ,**a__ ,)
if kwargs.pop("add_bos_token" ,a__ ):
a__ = kwargs.pop("name_or_path" ,"" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
a__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,a__ ) != add_prefix_space:
a__ = getattr(a__ ,pre_tok_state.pop("type" ) )
a__ = add_prefix_space
a__ = pre_tok_class(**a__ )
a__ = add_prefix_space
def lowerCAmelCase_ ( self : Dict ,*a__ : List[Any] ,**a__ : int ):
a__ = kwargs.get("is_split_into_words" ,a__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a__ ,**a__ )
def lowerCAmelCase_ ( self : Union[str, Any] ,*a__ : int ,**a__ : Optional[Any] ):
a__ = kwargs.get("is_split_into_words" ,a__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a__ ,**a__ )
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : str ,a__ : Optional[str] = None ):
a__ = self._tokenizer.model.save(a__ ,name=a__ )
return tuple(a__ )
def lowerCAmelCase_ ( self : List[Any] ,a__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] ,a__ : bool = False ,a__ : bool = None ,a__ : Optional[List[str]] = None ,**a__ : Any ,):
a__ = super().decode(
token_ids=a__ ,skip_special_tokens=a__ ,clean_up_tokenization_spaces=a__ ,**a__ ,)
if truncate_before_pattern is not None and len(a__ ) > 0:
a__ = self.truncate(a__ ,a__ )
return decoded_text
def lowerCAmelCase_ ( self : List[Any] ,a__ : Dict ,a__ : List[Any] ):
def find_re(a__ : List[str] ,a__ : Tuple ,a__ : Tuple ):
a__ = pattern.search(a__ ,a__ )
return m.start() if m else -1
a__ = [re.compile(a__ ,re.MULTILINE ) for pattern in truncate_before_pattern]
a__ = list(re.finditer("^print" ,a__ ,re.MULTILINE ) )
if len(a__ ) > 1:
a__ = completion[: prints[1].start()]
a__ = list(re.finditer("^def" ,a__ ,re.MULTILINE ) )
if len(a__ ) > 1:
a__ = completion[: defs[1].start()]
a__ = 0
a__ = [
pos for pos in [find_re(a__ ,a__ ,a__ ) for terminal in terminals] if pos != -1
]
if len(a__ ) > 0:
return completion[: min(a__ )]
else:
return completion
| 394
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if not is_accelerate_available():
return method
a__ = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *_lowercase , **_lowercase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 394
| 1
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase = """AutoImageProcessor"""
__UpperCAmelCase = """AutoTokenizer"""
def __init__( self : Optional[int] , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : int ):
'''simple docstring'''
__magic_name__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase_ , )
__magic_name__ = kwargs.pop('feature_extractor' )
__magic_name__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.image_processor
__magic_name__ = False
def __call__( self : List[str] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = kwargs.pop('images' , UpperCamelCase_ )
__magic_name__ = kwargs.pop('text' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__magic_name__ = self.image_processor(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
__magic_name__ = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__magic_name__ = encodings['input_ids']
return inputs
def a__ ( self : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def a__ ( self : Any , *UpperCamelCase_ : str , **UpperCamelCase_ : int ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def a__ ( self : int ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__magic_name__ = True
__magic_name__ = self.tokenizer
yield
__magic_name__ = self.image_processor
__magic_name__ = False
def a__ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
if added_vocab is None:
__magic_name__ = self.tokenizer.get_added_vocab()
__magic_name__ = {}
while tokens:
__magic_name__ = re.search(r'<s_(.*?)>' , UpperCamelCase_ , re.IGNORECASE )
if start_token is None:
break
__magic_name__ = start_token.group(1 )
__magic_name__ = re.search(rf"""</s_{key}>""" , UpperCamelCase_ , re.IGNORECASE )
__magic_name__ = start_token.group()
if end_token is None:
__magic_name__ = tokens.replace(UpperCamelCase_ , '' )
else:
__magic_name__ = end_token.group()
__magic_name__ = re.escape(UpperCamelCase_ )
__magic_name__ = re.escape(UpperCamelCase_ )
__magic_name__ = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCamelCase_ , re.IGNORECASE )
if content is not None:
__magic_name__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__magic_name__ = self.tokenajson(UpperCamelCase_ , is_inner_value=UpperCamelCase_ , added_vocab=UpperCamelCase_ )
if value:
if len(UpperCamelCase_ ) == 1:
__magic_name__ = value[0]
__magic_name__ = value
else: # leaf nodes
__magic_name__ = []
for leaf in content.split(r'<sep/>' ):
__magic_name__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__magic_name__ = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCamelCase_ )
if len(output[key] ) == 1:
__magic_name__ = output[key][0]
__magic_name__ = tokens[tokens.find(UpperCamelCase_ ) + len(UpperCamelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCamelCase_ , added_vocab=UpperCamelCase_ )
if len(UpperCamelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def a__ ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase_ , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase_ , )
return self.image_processor
| 545
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( __snake_case: Optional[int] ) -> Tuple:
"""simple docstring"""
for param in module.parameters():
__magic_name__ = False
def A ( ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__magic_name__ = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A ( __snake_case: int ) -> List[Any]:
"""simple docstring"""
__magic_name__ = plt.imshow(__snake_case )
fig.axes.get_xaxis().set_visible(__snake_case )
fig.axes.get_yaxis().set_visible(__snake_case )
plt.show()
def A ( ) -> List[Any]:
"""simple docstring"""
__magic_name__ = datetime.now()
__magic_name__ = current_time.strftime('%H:%M:%S' )
return timestamp
| 545
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : Optional[int] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : int = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
SCREAMING_SNAKE_CASE : List[str] = "3.0.12"
SCREAMING_SNAKE_CASE : str = None
def __A ( ):
"""simple docstring"""
global _logger
__a = _logger or logging.getLogger(__name__ )
return _logger
class A_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : str ):
__a = lock_file
return None
def __str__( self : List[str] ):
__a = f"""The file lock \'{self.lock_file}\' could not be acquired."""
return temp
class A_ :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ):
__a = lock
return None
def __enter__( self : Optional[Any] ):
return self.lock
def __exit__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ):
self.lock.release()
return None
class A_ :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=-1 , __SCREAMING_SNAKE_CASE : str=None ):
__a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__a = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
__a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__a = None
# The default timeout value.
__a = timeout
# We use this lock primarily for the lock counter.
__a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__a = 0
return None
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._lock_file
@property
def _UpperCAmelCase ( self : Tuple ):
return self._timeout
@timeout.setter
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
__a = float(A_ )
return None
def _UpperCAmelCase ( self : Dict ):
raise NotImplementedError()
def _UpperCAmelCase ( self : Union[str, Any] ):
raise NotImplementedError()
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self._lock_file_fd is not None
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
__a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__a = id(self )
__a = self._lock_file
__a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__a = id(self )
__a = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__a = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : str ):
self.acquire()
return self
def __exit__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
self.release()
return None
def __del__( self : Dict ):
self.release(force=A_ )
return None
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
__a = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
__a = os.path.dirname(A_ )
__a = str(hash(A_ ) )
__a = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=-1 , __SCREAMING_SNAKE_CASE : List[str]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
__a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _UpperCAmelCase ( self : Any ):
__a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__a = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
__a = fd
return None
def _UpperCAmelCase ( self : List[Any] ):
__a = self._lock_file_fd
__a = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]=-1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
__a = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def _UpperCAmelCase ( self : Optional[Any] ):
__a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__a = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
__a = fd
return None
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = self._lock_file_fd
__a = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A_ ( _SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : List[Any] ):
__a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__a = os.open(self._lock_file , A_ )
except OSError:
pass
else:
__a = fd
return None
def _UpperCAmelCase ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
__a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : str = None
if msvcrt:
SCREAMING_SNAKE_CASE : Tuple = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : Any = UnixFileLock
else:
SCREAMING_SNAKE_CASE : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 703
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A_ ( a_ , a_ , a_ ):
@register_to_config
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False , ):
super().__init__()
__a = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = False
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
__a = TaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , d_model=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , feed_forward_proj=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , )
__a = nn.ModuleList()
for lyr_num in range(__SCREAMING_SNAKE_CASE ):
__a = TaBlock(__SCREAMING_SNAKE_CASE )
self.encoders.append(__SCREAMING_SNAKE_CASE )
__a = TaLayerNorm(__SCREAMING_SNAKE_CASE )
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ):
__a = self.token_embedder(__SCREAMING_SNAKE_CASE )
__a = encoder_input_tokens.shape[1]
__a = torch.arange(__SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device )
x += self.position_encoding(__SCREAMING_SNAKE_CASE )
__a = self.dropout_pre(__SCREAMING_SNAKE_CASE )
# inverted the attention mask
__a = encoder_input_tokens.size()
__a = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for lyr in self.encoders:
__a = lyr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
__a = self.layer_norm(__SCREAMING_SNAKE_CASE )
return self.dropout_post(__SCREAMING_SNAKE_CASE ), encoder_inputs_mask
| 525
| 0
|
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = VOCAB_FILES_NAMES
snake_case_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Any = PRETRAINED_INIT_CONFIGURATION
snake_case_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Dict = RealmTokenizer
def __init__( self : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="[UNK]" , lowerCAmelCase : List[str]="[SEP]" , lowerCAmelCase : Optional[int]="[PAD]" , lowerCAmelCase : List[Any]="[CLS]" , lowerCAmelCase : Any="[MASK]" , lowerCAmelCase : Dict=True , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase) != tokenize_chinese_chars
):
_snake_case : Tuple = getattr(lowerCAmelCase , normalizer_state.pop("""type"""))
_snake_case : Any = do_lower_case
_snake_case : Optional[int] = strip_accents
_snake_case : str = tokenize_chinese_chars
_snake_case : List[str] = normalizer_class(**lowerCAmelCase)
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[Any] = kwargs.pop("""text_pair""" , lowerCAmelCase)
_snake_case : Union[str, Any] = kwargs.pop("""return_tensors""" , lowerCAmelCase)
_snake_case : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCAmelCase):
if batch_text_pair is not None:
_snake_case : Dict = batch_text_pair[idx]
else:
_snake_case : List[str] = None
_snake_case : Optional[int] = super().__call__(lowerCAmelCase , lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
_snake_case : str = encoded_candidates.get("""input_ids""")
_snake_case : Union[str, Any] = encoded_candidates.get("""attention_mask""")
_snake_case : Any = encoded_candidates.get("""token_type_ids""")
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCAmelCase)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCAmelCase)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCAmelCase)
_snake_case : str = {key: item for key, item in output_data.items() if len(lowerCAmelCase) != 0}
return BatchEncoding(lowerCAmelCase , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=None) -> List[str]:
"""simple docstring"""
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_snake_case : Dict = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
| 477
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['ConvNextFeatureExtractor']
_snake_case = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 716
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = ["image_processor", "tokenizer"]
UpperCAmelCase__ = "AutoImageProcessor"
UpperCAmelCase__ = "AutoTokenizer"
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
__UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __lowercase( self ) -> List[Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 567
| 0
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( __snake_case , unittest.TestCase ):
UpperCamelCase = VideoToVideoSDPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase = False
# No `output_type`.
UpperCamelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
UpperCAmelCase = CLIPTextModel(__lowerCamelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _lowercase ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(__lowerCamelCase )
else:
UpperCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**__lowerCamelCase )
UpperCAmelCase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_dummy_inputs(__lowerCamelCase )
UpperCAmelCase = """np"""
UpperCAmelCase = sd_pipe(**__lowerCamelCase ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=__lowerCamelCase )
UpperCAmelCase = video.to("""cuda""" )
UpperCAmelCase = """Spiderman is surfing"""
UpperCAmelCase = pipe(__lowerCamelCase , video=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=3 , output_type="""pt""" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 377
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def A_( A : int , A : int , A : bool , A : list[int] , A : float):
if depth < 0:
raise ValueError('Depth cannot be less than 0')
if not scores:
raise ValueError('Scores cannot be empty')
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A) , )
)
def A_( ):
UpperCamelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
UpperCamelCase = math.log(len(A) , 2)
print(f'''Optimal value : {minimax(0 , 0 , A , A , A)}''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Union[str, Any] = 16
lowerCAmelCase : Any = 32
def A_( A : Accelerator , A : int = 16):
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase = load_dataset('glue' , 'mrpc')
def tokenize_function(A : Dict):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(A : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A)
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def A_( A : List[str] , A : Dict):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'])
UpperCamelCase = int(config['seed'])
UpperCamelCase = int(config['batch_size'])
UpperCamelCase = evaluate.load('glue' , 'mrpc')
set_seed(A)
UpperCamelCase , UpperCamelCase = get_dataloaders(A , A)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=A)
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
A , A , A , A , A)
# Now we train the model
for epoch in range(A):
model.train()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A):
UpperCamelCase = model(**A)
UpperCamelCase = output.loss
accelerator.backward(A)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase = model(**A)
UpperCamelCase = outputs.logits.argmax(dim=-1)
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=A , references=A , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A)
def A_( ):
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=A , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A , A)
if __name__ == "__main__":
main()
| 432
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 90
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 90
| 1
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( __a , __a ):
'''simple docstring'''
@register_to_config
def __init__( self : str , *,
__lowercase : int = 4 , __lowercase : int = 7_68 , __lowercase : int , __lowercase : str , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(a_ ) )
# parameters for additional clip time embeddings
UpperCAmelCase_ = nn.Linear(a_ , a_ )
UpperCAmelCase_ = nn.Linear(a_ , a_ )
# parameters for encoder hidden states
UpperCAmelCase_ = clip_extra_context_tokens
UpperCAmelCase_ = nn.Linear(
a_ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase_ = nn.Linear(a_ , a_ )
UpperCAmelCase_ = nn.LayerNorm(a_ )
def SCREAMING_SNAKE_CASE ( self : Any , *, __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase_ = image_embeddings.shape[0]
UpperCAmelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase_ = classifier_free_guidance_embeddings.expand(
a_ , -1 )
UpperCAmelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase_ = self.embedding_proj(a_ )
UpperCAmelCase_ = self.clip_image_embeddings_project_to_time_embeddings(a_ )
UpperCAmelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase_ = self.clip_extra_context_tokens_proj(a_ )
UpperCAmelCase_ = clip_extra_context_tokens.reshape(a_ , -1 , self.clip_extra_context_tokens )
UpperCAmelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase_ = self.encoder_hidden_states_proj(a_ )
UpperCAmelCase_ = self.text_encoder_hidden_states_norm(a_ )
UpperCAmelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 706
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : np.ndarray
lowerCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 486
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 94
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 100_0000 ) -> int:
_UpperCAmelCase : str = set(range(3 , lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase , lowerCAmelCase ) ) )
_UpperCAmelCase : Tuple = [float(lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase , limit + 1 , lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 300
| 0
|
'''simple docstring'''
import numpy as np
def UpperCAmelCase ( a_ ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 385
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None ) -> Any:
super().__init__()
__a = pad_token_id
__a = max_length
__a = vocab
__a = merges
__a = BytePairTokenizer(UpperCamelCase , UpperCamelCase , sequence_length=UpperCamelCase )
@classmethod
def UpperCamelCase__ ( cls , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ) -> Dict:
__a = [' '.join(UpperCamelCase ) for m in tokenizer.bpe_ranks.keys()]
__a = tokenizer.get_vocab()
return cls(UpperCamelCase , UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase__ ( cls , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ) -> List[str]:
__a = GPTaTokenizer.from_pretrained(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
return cls.from_tokenizer(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase__ ( cls , UpperCamelCase ) -> Optional[Any]:
return cls(**UpperCamelCase )
def UpperCamelCase__ ( self ) -> Any:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> int:
__a = self.tf_tokenizer(UpperCamelCase )
__a = tf.ones_like(UpperCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
__a = max_length if max_length is not None else self.max_length
if max_length is not None:
__a , __a = pad_model_inputs(
UpperCamelCase , max_seq_length=UpperCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 539
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : Union[str, Any] , a_ : Dict ):
__a = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__a = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__a = f"{src_lang}-{tgt_lang}"
__a = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a_ , exist_ok=a_ )
__a = os.path.join(a_ , 'README.md' )
print(f"Generating {path}" )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(a_ )
# make sure we are under the root of the project
UpperCAmelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase_ = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model_name.split("-")
UpperCAmelCase_ = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 539
| 1
|
import math
def UpperCamelCase_ ( __a , __a ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__a ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 151
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( A__ ):
"""simple docstring"""
_lowercase = (DPMSolverSinglestepScheduler,)
_lowercase = (('num_inference_steps', 2_5),)
def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : Tuple ):
a__ : Any = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCamelCase__ )
return config
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple=0 , **lowerCamelCase__ : Union[str, Any] ):
a__ : Optional[Any] = dict(self.forward_default_kwargs )
a__ : Any = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Any = self.dummy_sample
a__ : int = 0.1 * sample
a__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
a__, a__ : Any = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : List[str] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any]=0 , **lowerCamelCase__ : Optional[int] ):
a__ : Tuple = dict(self.forward_default_kwargs )
a__ : Tuple = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Union[str, Any] = self.dummy_sample
a__ : Tuple = 0.1 * sample
a__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config()
a__ : List[str] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : int = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : Dict = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Union[str, Any] ):
if scheduler is None:
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
a__ : str = scheduler_class(**lowerCamelCase__ )
a__ : List[Any] = self.scheduler_classes[0]
a__ : int = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
a__ : Any = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def _UpperCamelCase( self : str ):
a__ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : Optional[Any] = 50
a__ : List[str] = self.dummy_model()
a__ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a__ : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : int = self.full_loop(scheduler=lowerCamelCase__ )
a__ : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
a__ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
a__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
a__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a__ : Optional[Any] = self.full_loop(scheduler=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def _UpperCamelCase( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
a__ : Dict = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def _UpperCamelCase( self : str ):
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type="learned_range" )
def _UpperCamelCase( self : Any ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[int] = self.full_loop()
a__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : List[str] = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _UpperCamelCase( self : int ):
a__ : List[Any] = self.full_loop(prediction_type="v_prediction" )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _UpperCamelCase( self : Tuple ):
a__ : Any = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCamelCase__ )
a__ : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
a__ : int = scheduler_class(**lowerCamelCase__ )
a__ : int = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 151
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any]=False ):
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
_lowerCAmelCase = """segformer.encoder.""" + key
if key.startswith('backbone' ):
_lowerCAmelCase = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
_lowerCAmelCase = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(__lowerCAmelCase )-1}" )
if "norm" in key:
_lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
_lowerCAmelCase = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(__lowerCAmelCase )-1}" )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find('block' ) + len('block' )]
_lowerCAmelCase = key.replace(F"block{idx}" , F"block.{int(__lowerCAmelCase )-1}" )
if "attn.q" in key:
_lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
_lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
_lowerCAmelCase = key.replace(F"linear_c{idx}" , F"linear_c.{int(__lowerCAmelCase )-1}" )
if key.startswith('head' ):
_lowerCAmelCase = key.replace('head' , 'classifier' )
_lowerCAmelCase = value
return new_state_dict
def A_ ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
_lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def A_ ( ):
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def A_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] ):
_lowerCAmelCase = SegformerConfig()
_lowerCAmelCase = False
# set attributes based on model_name
_lowerCAmelCase = """huggingface/label-files"""
if "segformer" in model_name:
_lowerCAmelCase = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
_lowerCAmelCase = 150
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCAmelCase = 19
_lowerCAmelCase = """cityscapes-id2label.json"""
_lowerCAmelCase = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
_lowerCAmelCase = True
_lowerCAmelCase = model_name[4:6]
_lowerCAmelCase = 1_000
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = (1, 1_000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 256
elif size == "b2":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 4, 18, 3]
elif size == "b4":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 8, 27, 3]
elif size == "b5":
_lowerCAmelCase = [64, 128, 320, 512]
_lowerCAmelCase = 768
_lowerCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCAmelCase , align=__lowerCAmelCase , do_random_crop=__lowerCAmelCase )
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
_lowerCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )
else:
_lowerCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )["""state_dict"""]
# rename keys
_lowerCAmelCase = rename_keys(__lowerCAmelCase , encoder_only=__lowerCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__lowerCAmelCase , __lowerCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCAmelCase = False
_lowerCAmelCase = SegformerForImageClassification(__lowerCAmelCase )
else:
_lowerCAmelCase = SegformerForSemanticSegmentation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# forward pass
_lowerCAmelCase = model(__lowerCAmelCase )
_lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCAmelCase = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCAmelCase = torch.tensor(
[
[
[-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1],
[-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1],
[-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1],
],
[
[-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1],
[-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1],
[-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1],
],
[
[7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2],
[4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1],
[3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCAmelCase = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
_lowerCAmelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 309
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=False ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__UpperCamelCase : Tuple = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading PyTorch weights from {pt_path}' )
__UpperCamelCase : int = torch.load(__lowerCAmelCase , map_location="""cpu""" )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__UpperCamelCase : List[Any] = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__UpperCamelCase : List[str] = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
return flax_state_dict
def __lowerCamelCase ( __lowerCAmelCase : Tuple[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, jnp.ndarray] , __lowerCAmelCase : str , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCAmelCase : Tuple[str] ) -> bool:
return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__UpperCamelCase : Optional[int] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__UpperCamelCase : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__UpperCamelCase : Optional[Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__UpperCamelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase : Tuple = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__UpperCamelCase : Optional[int] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__UpperCamelCase : Optional[int] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__UpperCamelCase : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__UpperCamelCase : Tuple = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__UpperCamelCase : Dict = pt_tuple_key[-2] + """_v"""
if name is not None:
__UpperCamelCase : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Union[str, Any]:
# convert pytorch tensor to numpy
__UpperCamelCase : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__UpperCamelCase : Union[str, Any] = flax_model.params["""params"""]
else:
__UpperCamelCase : List[Any] = flax_model.params
__UpperCamelCase : List[Any] = flatten_dict(__lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase : List[str] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(__lowerCAmelCase )
__UpperCamelCase : List[Any] = {}
__UpperCamelCase : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase : List[str] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__UpperCamelCase : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase : Optional[int] = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__UpperCamelCase : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__UpperCamelCase : int = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase : Optional[Any] = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase : Optional[int] = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> Dict:
import torch
# Load the index
__UpperCamelCase : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__UpperCamelCase : List[str] = torch.load(__lowerCAmelCase )
__UpperCamelCase : int = {k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase : Optional[Any] = flax_model.params["""params"""]
__UpperCamelCase : int = flatten_dict(__lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__UpperCamelCase : Dict = flax_model.params
__UpperCamelCase : int = flatten_dict(__lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase : Any = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__UpperCamelCase : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase : str = pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase : Optional[Any] = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__UpperCamelCase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__UpperCamelCase : List[Any] = jnp.asarray(__lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__UpperCamelCase : Optional[Any] = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase : int = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase : int = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__UpperCamelCase : str = getattr(__lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCAmelCase , """rb""" ) as state_f:
try:
__UpperCamelCase : Optional[int] = from_bytes(__lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__UpperCamelCase : Tuple = flatten_dict(jax.tree_util.tree_map(lambda __lowerCAmelCase : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values()
if any(__lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__UpperCamelCase : List[Any] = jax.tree_util.tree_map(
lambda __lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase )
__UpperCamelCase : Tuple = flatten_dict(__lowerCAmelCase )
__UpperCamelCase : int = pt_model.state_dict()
__UpperCamelCase : Tuple = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__UpperCamelCase : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__UpperCamelCase : Dict = []
__UpperCamelCase : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
__UpperCamelCase : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase : List[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase : Tuple = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# conv layer
__UpperCamelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
__UpperCamelCase : Dict = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# linear layer
__UpperCamelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
__UpperCamelCase : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__UpperCamelCase : List[str] = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
__UpperCamelCase : Tuple = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
__UpperCamelCase : Optional[int] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__UpperCamelCase : Optional[int] = """.""".join(__lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__UpperCamelCase : Union[str, Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__UpperCamelCase : int = key.split(""".""" )
__UpperCamelCase : str = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__UpperCamelCase : Union[str, Any] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
__UpperCamelCase : Tuple = key_components[-2] + """_v"""
if name is not None:
__UpperCamelCase : Optional[Any] = key_components[:-3] + [name]
__UpperCamelCase : int = """.""".join(__lowerCAmelCase )
__UpperCamelCase : List[str] = key
if flax_key in special_pt_names:
__UpperCamelCase : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__UpperCamelCase : str = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor
__UpperCamelCase : Optional[int] = torch.from_numpy(__lowerCAmelCase )
# remove from missing keys
missing_keys.remove(__lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCAmelCase )
pt_model.load_state_dict(__lowerCAmelCase )
# re-transform missing_keys to list
__UpperCamelCase : Union[str, Any] = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(__lowerCAmelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 269
| 0
|
from typing import Dict
from .base import GenericTensor, Pipeline
class _A ( __lowercase ):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if tokenize_kwargs is None:
_UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_UpperCAmelCase = truncation
_UpperCAmelCase = tokenize_kwargs
_UpperCAmelCase = {}
if return_tensors is not None:
_UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.framework
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 175
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
for attribute in key.split(""".""" ):
_UpperCAmelCase = getattr(snake_case , snake_case )
if weight_type is not None:
_UpperCAmelCase = getattr(snake_case , snake_case ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(snake_case )[0].split(""".""" )[-2]
_UpperCAmelCase = mapped_key.replace("""*""" , snake_case )
if "weight_g" in name:
_UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase = """weight_v"""
elif "bias" in name:
_UpperCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = """weight"""
else:
_UpperCAmelCase = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f"Unused weights: {unused_weights}" )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase = name.split(""".""" )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=None , snake_case=None , snake_case=True ) -> List[Any]:
if config_path is not None:
_UpperCAmelCase = UniSpeechSatConfig.from_pretrained(snake_case )
else:
_UpperCAmelCase = UniSpeechSatConfig()
_UpperCAmelCase = """"""
if is_finetuned:
_UpperCAmelCase = UniSpeechSatForCTC(snake_case )
else:
_UpperCAmelCase = UniSpeechSatForPreTraining(snake_case )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(snake_case , snake_case )
hf_wavavec.save_pretrained(snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 175
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __A ( a_ :Tuple , a_ :str , a_ :str , a_ :Path , a_ :str = None , a_ :str = None , a_ :str = None , ) -> List[Any]:
if config_name_or_path is None:
__a : Optional[int] = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
__a : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__a : Dict = question_encoder_name_or_path
__a : Union[str, Any] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
__a : Optional[int] = RagConfig.from_pretrained(a_)
__a : List[str] = AutoConfig.from_pretrained(a_)
__a : Any = AutoConfig.from_pretrained(a_)
__a : Any = gen_config
__a : Tuple = question_encoder_config
__a : List[Any] = model_class.from_pretrained_question_encoder_generator(
a_ , a_ , config=a_)
rag_model.save_pretrained(a_)
# Sanity check.
model_class.from_pretrained(a_)
# Save tokenizers.
__a : str = AutoTokenizer.from_pretrained(a_)
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''')
__a : Optional[Any] = AutoTokenizer.from_pretrained(a_)
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
A = parser.parse_args()
A = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 52
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__SCREAMING_SNAKE_CASE : str = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__SCREAMING_SNAKE_CASE : Any = TaTokenizerFast
__SCREAMING_SNAKE_CASE : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 452
| 0
|
"""simple docstring"""
import os
def __lowerCAmelCase( ):
"""simple docstring"""
with open(os.path.dirname(a__ ) + '/p022_names.txt' ) as file:
_lowercase : List[Any] = str(file.readlines()[0] )
_lowercase : Dict = names.replace('\"' ,'' ).split(',' )
names.sort()
_lowercase : int = 0
_lowercase : List[Any] = 0
for i, name in enumerate(a__ ):
for letter in name:
name_score += ord(a__ ) - 64
total_score += (i + 1) * name_score
_lowercase : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 716
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["input_features", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2768.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = feature_size
_lowercase : Dict = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : Any = win_length
_lowercase : Union[str, Any] = frame_signal_scale
_lowercase : Tuple = preemphasis_coeff
_lowercase : Tuple = mel_floor
_lowercase : Tuple = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : int = return_attention_mask
_lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0
_lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0
_lowercase : str = optimal_fft_length(self.sample_size )
_lowercase : Dict = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Optional[int] = x[:input_length].mean(axis=0 )
_lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if self.normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Dict = padding_value
# make sure array is in float32
_lowercase : Tuple = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ):
"""simple docstring"""
_lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )]
def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : str = [raw_speech]
# extract fbank features
_lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : Optional[int] = BatchFeature({'input_features': features} )
_lowercase : Tuple = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , lowerCamelCase_ ):
_lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
_lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 283
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Union[List[PIL.Image.Image], np.ndarray]
_a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 200
|
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
lowercase__ : List[str] = len(lowerCamelCase__ )
lowercase__ : Optional[int] = sum(lowerCamelCase__ )
lowercase__ : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowercase__ : int = True
for i in range(1 , s + 1 ):
lowercase__ : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowercase__ : Optional[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
lowercase__ : int = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowercase__ : List[Any] = s - 2 * j
break
return diff
| 200
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "SpeechT5FeatureExtractor"
snake_case_ = "SpeechT5Tokenizer"
def __init__( self : Optional[int] , __snake_case : Any , __snake_case : List[str] )-> List[Any]:
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self : Union[str, Any] , *__snake_case : Tuple , **__snake_case : Any )-> str:
snake_case = kwargs.pop("""audio""" , UpperCamelCase_ )
snake_case = kwargs.pop("""text""" , UpperCamelCase_ )
snake_case = kwargs.pop("""text_target""" , UpperCamelCase_ )
snake_case = kwargs.pop("""audio_target""" , UpperCamelCase_ )
snake_case = kwargs.pop("""sampling_rate""" , UpperCamelCase_ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
elif text is not None:
snake_case = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
else:
snake_case = None
if audio_target is not None:
snake_case = self.feature_extractor(audio_target=UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
snake_case = targets['input_values']
elif text_target is not None:
snake_case = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
snake_case = targets['input_ids']
else:
snake_case = None
if inputs is None:
return targets
if targets is not None:
snake_case = labels
snake_case = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
snake_case = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : List[str] , *__snake_case : str , **__snake_case : Any )-> List[Any]:
snake_case = kwargs.pop("""input_values""" , UpperCamelCase_ )
snake_case = kwargs.pop("""input_ids""" , UpperCamelCase_ )
snake_case = kwargs.pop("""labels""" , UpperCamelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
snake_case = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
elif input_ids is not None:
snake_case = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
else:
snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCamelCase_ , UpperCamelCase_ ) and "input_ids" in labels[0]):
snake_case = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
snake_case = targets['input_ids']
else:
snake_case = self.feature_extractor.feature_size
snake_case = self.feature_extractor.num_mel_bins
snake_case = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
snake_case = feature_size_hack
snake_case = targets['input_values']
else:
snake_case = None
if inputs is None:
return targets
if targets is not None:
snake_case = labels
snake_case = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
snake_case = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : List[str] , *__snake_case : List[str] , **__snake_case : Union[str, Any] )-> Optional[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase ( self : int , *__snake_case : Tuple , **__snake_case : int )-> str:
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
| 704
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 517
| 0
|
"""simple docstring"""
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = (0, 0)
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
def __eq__( self : Dict , a_ : Dict )-> Any:
"""simple docstring"""
return self.position == cell.position
def a ( self : Dict )-> List[Any]:
"""simple docstring"""
print(self.position )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , a_ : Dict=(5, 5) )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = np.zeros(a_ )
UpperCAmelCase_ : List[Any] = world_size[0]
UpperCAmelCase_ : List[str] = world_size[1]
def a ( self : Any )-> Union[str, Any]:
"""simple docstring"""
print(self.w )
def a ( self : Union[str, Any] , a_ : str )-> int:
"""simple docstring"""
UpperCAmelCase_ : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ : Tuple = cell.position[0]
UpperCAmelCase_ : Union[str, Any] = cell.position[1]
UpperCAmelCase_ : List[Any] = []
for n in neughbour_cord:
UpperCAmelCase_ : Optional[Any] = current_x + n[0]
UpperCAmelCase_ : Any = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ : Dict = Cell()
UpperCAmelCase_ : Optional[Any] = (x, y)
UpperCAmelCase_ : Optional[Any] = cell
neighbours.append(a_ )
return neighbours
def A_ ( lowercase , lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
_open.append(lowercase )
while _open:
UpperCAmelCase_ : str = np.argmin([n.f for n in _open] )
UpperCAmelCase_ : Tuple = _open[min_f]
_closed.append(_open.pop(lowercase ) )
if current == goal:
break
for n in world.get_neigbours(lowercase ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ : List[Any] = current.g + 1
UpperCAmelCase_ ,UpperCAmelCase_ : Dict = n.position
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = goal.position
UpperCAmelCase_ : List[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase )
UpperCAmelCase_ : Dict = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ : List[str] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 470
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : str = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : List[str] = """OwlViTImageProcessor"""
UpperCamelCase_ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , a_ : Any=None , a_ : str=None , **a_ : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a_ , )
UpperCAmelCase_ : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="max_length" , a_ : List[Any]="np" , **a_ : int )-> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
UpperCAmelCase_ : List[str] = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
UpperCAmelCase_ : Optional[int] = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
UpperCAmelCase_ : str = t + [""" """] * (max_num_queries - len(a_ ))
UpperCAmelCase_ : Optional[int] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase_ : List[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : str = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Union[str, Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : int = input_ids
UpperCAmelCase_ : List[str] = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
UpperCAmelCase_ : Optional[Any] = query_pixel_values
if images is not None:
UpperCAmelCase_ : str = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def a ( self : Any , *a_ : Optional[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def a ( self : Tuple , *a_ : List[str] , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def a ( self : Optional[int] , *a_ : Tuple , **a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def a ( self : str , *a_ : Optional[int] , **a_ : str )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def a ( self : str , *a_ : List[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def a ( self : Tuple )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a_ , )
return self.image_processor_class
@property
def a ( self : Optional[Any] )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a_ , )
return self.image_processor
| 470
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = "openai-gpt"
UpperCAmelCase__ : List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=4_0_4_7_8 , _a=5_1_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1e-5 , _a=0.02 , _a="cls_index" , _a=True , _a=None , _a=True , _a=0.1 , **_a , ) -> List[str]:
_a : int = vocab_size
_a : List[Any] = n_positions
_a : str = n_embd
_a : Optional[int] = n_layer
_a : Tuple = n_head
_a : List[str] = afn
_a : Union[str, Any] = resid_pdrop
_a : Tuple = embd_pdrop
_a : Optional[Any] = attn_pdrop
_a : Union[str, Any] = layer_norm_epsilon
_a : int = initializer_range
_a : int = summary_type
_a : Any = summary_use_proj
_a : Union[str, Any] = summary_activation
_a : Optional[Any] = summary_first_dropout
_a : Tuple = summary_proj_to_labels
super().__init__(**_a )
| 709
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "gptj"
UpperCAmelCase__ : Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=5_0_4_0_0 , _a=2_0_4_8 , _a=4_0_9_6 , _a=2_8 , _a=1_6 , _a=6_4 , _a=None , _a="gelu_new" , _a=0.0 , _a=0.0 , _a=0.0 , _a=1e-5 , _a=0.02 , _a=True , _a=5_0_2_5_6 , _a=5_0_2_5_6 , _a=False , **_a , ) -> str:
_a : Any = vocab_size
_a : str = n_positions
_a : Union[str, Any] = n_embd
_a : Tuple = n_layer
_a : int = n_head
_a : List[str] = n_inner
_a : List[str] = rotary_dim
_a : Optional[int] = activation_function
_a : List[str] = resid_pdrop
_a : List[str] = embd_pdrop
_a : Optional[Any] = attn_pdrop
_a : Union[str, Any] = layer_norm_epsilon
_a : Optional[Any] = initializer_range
_a : Tuple = use_cache
_a : Union[str, Any] = bos_token_id
_a : Tuple = eos_token_id
super().__init__(
bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a = "default" , _a = None , _a = False , ) -> List[str]:
super().__init__(_a , task=_a , patching_specs=_a , use_past=_a )
if not getattr(self._config , '''pad_token_id''' , _a ):
# TODO: how to do that better?
_a : Optional[int] = 0
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_a : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
_a : Optional[int] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowercase ( self ) -> int:
return self._config.n_layer
@property
def __lowercase ( self ) -> int:
return self._config.n_head
def __lowercase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
_a : str = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
_a : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a : Dict = seqlen + 2
_a : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Union[str, Any] = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
_a : Any = common_inputs['''attention_mask''']
if self.use_past:
_a : str = ordered_inputs['''attention_mask'''].dtype
_a : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self ) -> int:
return 1_3
| 578
| 0
|
'''simple docstring'''
import math
lowerCAmelCase_ : Any = 10
lowerCAmelCase_ : List[Any] = 7
lowerCAmelCase_ : int = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCAmelCase ( A : int = 20 ):
SCREAMING_SNAKE_CASE : Optional[int] = math.comb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE : int = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 527
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 477
| 0
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( *__a : str , **__a : Optional[Any] ) ->List[Any]:
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase_ : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
lowerCamelCase_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase_ : Optional[Any] = image_classifier(__a , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
lowerCamelCase_ : List[str] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
] , )
@require_tf
def _lowerCAmelCase ( self : List[str] ) ->Optional[int]:
lowerCamelCase_ : List[str] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
lowerCamelCase_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase_ : int = image_classifier(__a , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__a ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
lowerCamelCase_ : List[Any] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
[
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
{"""score""": 0.333, """label""": ANY(__a )},
],
] , )
@slow
@require_torch
def _lowerCAmelCase ( self : Dict ) ->Optional[Any]:
lowerCamelCase_ : int = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase_ : List[str] = image_classifier(__a , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__a ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCamelCase_ : int = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase_ : Union[str, Any] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase_ : Optional[int] = image_classifier(__a , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__a ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCamelCase_ : Tuple = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 171
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "detr"
_a = ["past_key_values"]
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : int , __a : Dict=True , __a : Union[str, Any]=None , __a : Union[str, Any]=3 , __a : Dict=100 , __a : str=6 , __a : List[str]=2_048 , __a : Any=8 , __a : List[str]=6 , __a : List[str]=2_048 , __a : str=8 , __a : Tuple=0.0 , __a : Dict=0.0 , __a : Optional[int]=True , __a : Union[str, Any]="relu" , __a : Optional[int]=256 , __a : Tuple=0.1 , __a : List[str]=0.0 , __a : Tuple=0.0 , __a : Tuple=0.02 , __a : Optional[Any]=1.0 , __a : List[str]=False , __a : Optional[int]="sine" , __a : Optional[Any]="resnet50" , __a : Optional[int]=True , __a : Dict=False , __a : Union[str, Any]=1 , __a : Optional[Any]=5 , __a : List[Any]=2 , __a : Any=1 , __a : int=1 , __a : List[str]=5 , __a : int=2 , __a : Any=0.1 , **__a : List[Any] , ) ->str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
lowerCamelCase_ : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : List[str] = config_class.from_dict(__a )
# set timm attributes to None
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = None, None, None
lowerCamelCase_ : Dict = use_timm_backbone
lowerCamelCase_ : Optional[Any] = backbone_config
lowerCamelCase_ : List[Any] = num_channels
lowerCamelCase_ : int = num_queries
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : List[str] = encoder_attention_heads
lowerCamelCase_ : Any = decoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = decoder_layers
lowerCamelCase_ : List[Any] = decoder_attention_heads
lowerCamelCase_ : Optional[Any] = dropout
lowerCamelCase_ : List[str] = attention_dropout
lowerCamelCase_ : List[Any] = activation_dropout
lowerCamelCase_ : Union[str, Any] = activation_function
lowerCamelCase_ : int = init_std
lowerCamelCase_ : Optional[Any] = init_xavier_std
lowerCamelCase_ : Any = encoder_layerdrop
lowerCamelCase_ : List[Any] = decoder_layerdrop
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : Any = auxiliary_loss
lowerCamelCase_ : Tuple = position_embedding_type
lowerCamelCase_ : Optional[int] = backbone
lowerCamelCase_ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase_ : int = dilation
# Hungarian matcher
lowerCamelCase_ : str = class_cost
lowerCamelCase_ : Union[str, Any] = bbox_cost
lowerCamelCase_ : Tuple = giou_cost
# Loss coefficients
lowerCamelCase_ : Optional[int] = mask_loss_coefficient
lowerCamelCase_ : int = dice_loss_coefficient
lowerCamelCase_ : str = bbox_loss_coefficient
lowerCamelCase_ : List[str] = giou_loss_coefficient
lowerCamelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : List[str] ) ->int:
return self.d_model
@classmethod
def _lowerCAmelCase ( cls : Tuple , __a : PretrainedConfig , **__a : Dict ) ->Optional[int]:
return cls(backbone_config=__a , **__a )
def _lowerCAmelCase ( self : List[Any] ) ->Dict[str, any]:
lowerCamelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase_ : List[str] = self.backbone_config.to_dict()
lowerCamelCase_ : Union[str, Any] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self : Optional[Any] ) ->float:
return 1e-5
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return 12
| 171
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase )
processor.save_pretrained(self.tmpdirname )
def _A ( self: Any , **_lowerCamelCase: Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).tokenizer
def _A ( self: int , **_lowerCamelCase: str ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _A ( self: Tuple , **_lowerCamelCase: Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).qformer_tokenizer
def _A ( self: str ):
shutil.rmtree(self.tmpdirname )
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
self.assertIsInstance(processor.qformer_tokenizer , _lowercase )
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = processor(text=_lowercase )
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase , return_token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(_lowercase )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 234
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=1_6 , SCREAMING_SNAKE_CASE_ = 1_0 , SCREAMING_SNAKE_CASE_ = 2 )-> Optional[Any]:
"""simple docstring"""
def get_dataset(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase_ = get_dataset(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = get_dataset(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
UpperCamelCase_ = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> Any:
"""simple docstring"""
UpperCamelCase_ = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase_ , UpperCamelCase_ = batch
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module ):
def __init__( self )-> List[Any]:
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.randn(1 ) )
UpperCamelCase_ = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase_ ( self , _lowercase )-> str:
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(total_limit=1 , project_dir=_lowercase , automatic_checkpoint_naming=_lowercase )
# Train baseline
UpperCamelCase_ = Accelerator(project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
# Train baseline
UpperCamelCase_ = Accelerator()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
UpperCamelCase_ = os.path.join(_lowercase , "initial" )
accelerator.save_state(_lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
UpperCamelCase_ = train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = Accelerator()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(_lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
UpperCamelCase_ = train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
UpperCamelCase_ = os.path.join(_lowercase , "checkpoint" )
accelerator.save_state(_lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(_lowercase )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
UpperCamelCase_ = train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowercase )
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(os.path.join(_lowercase , "checkpoints" , "checkpoint_0" ) )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
UpperCamelCase_ = train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = torch.tensor([1, 2, 3] )
UpperCamelCase_ = torch.tensor([2, 3, 4] )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(net.parameters() )
UpperCamelCase_ = Accelerator()
with self.assertRaises(_lowercase ) as ve:
accelerator.register_for_checkpointing(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ = torch.optim.lr_scheduler.StepLR(_lowercase , step_size=1 , gamma=0.99 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
UpperCamelCase_ = scheduler.state_dict()
train(3 , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertNotEqual(_lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(_lowercase , scheduler.state_dict() )
def UpperCAmelCase_ ( self )-> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=_lowercase , total_limit=2 )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ = accelerator.prepare(_lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_lowercase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowercase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = """/tmp/accelerate/state_checkpointing"""
SCREAMING_SNAKE_CASE :Any = DummyModel()
SCREAMING_SNAKE_CASE :Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
SCREAMING_SNAKE_CASE :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :str = dummy_dataloaders()
SCREAMING_SNAKE_CASE :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE :int = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Tuple = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE :Optional[Any] = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE :List[str] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE :Optional[int] = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE :str = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 628
| 0
|
lowerCamelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def A(__a: int , __a: int , __a: int ):
assert len(str(__a ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCAmelCase_ = year // 100
lowerCAmelCase_ = (5 * (century % 4) + 2) % 7
lowerCAmelCase_ = year % 100
lowerCAmelCase_ = centurian % 12
lowerCAmelCase_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCAmelCase_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCAmelCase_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
|
from functools import lru_cache
def A(__a: int ):
lowerCAmelCase_ = 2
lowerCAmelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__a )
if n > 1:
factors.add(__a )
return factors
@lru_cache
def A(__a: int ):
return len(unique_prime_factors(__a ) )
def A(__a: list ):
return len(set(__a ) ) in (0, 1)
def A(__a: int ):
lowerCAmelCase_ = 2
while True:
# Increment each value of a generated range
lowerCAmelCase_ = [base + i for i in range(__a )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase_ = [upf_len(__a ) for x in group]
checker.append(__a )
# If all numbers in the list are equal, return the group variable.
if equality(__a ):
return group
# Increment our base variable by 1
base += 1
def A(__a: int = 4 ):
lowerCAmelCase_ = run(__a )
return results[0] if len(__a ) else None
if __name__ == "__main__":
print(solution())
| 226
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :int = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
from __future__ import annotations
import math
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
)
def UpperCAmelCase__ ( ) -> None:
_A = [90, 23, 6, 33, 21, 65, 123, 34_423]
_A = math.log(len(__snake_case ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 317
| 0
|
"""simple docstring"""
from functools import reduce
lowerCAmelCase__ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a__ ( SCREAMING_SNAKE_CASE : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str(int(SCREAMING_SNAKE_CASE ) * int(SCREAMING_SNAKE_CASE ) ) , n[i : i + 1_3] ) )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1_2 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 681
|
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( UpperCamelCase__ ):
_lowerCAmelCase = ["""input_features""", """is_longer"""]
def __init__( self : List[str] , lowerCamelCase__ : Optional[Any]=6_4 , lowerCamelCase__ : Union[str, Any]=4_8_0_0_0 , lowerCamelCase__ : Any=4_8_0 , lowerCamelCase__ : int=1_0 , lowerCamelCase__ : Any=1_0_2_4 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[str] = 0 , lowerCamelCase__ : Optional[Any] = 1_4_0_0_0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : List[Any] = "fusion" , lowerCamelCase__ : int = "repeatpad" , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase : Optional[Any] = top_db
lowerCAmelCase : List[Any] = truncation
lowerCAmelCase : Any = padding
lowerCAmelCase : Optional[int] = fft_window_size
lowerCAmelCase : int = (fft_window_size >> 1) + 1
lowerCAmelCase : Any = hop_length
lowerCAmelCase : int = max_length_s
lowerCAmelCase : Union[str, Any] = max_length_s * sampling_rate
lowerCAmelCase : Optional[Any] = sampling_rate
lowerCAmelCase : Optional[int] = frequency_min
lowerCAmelCase : List[str] = frequency_max
lowerCAmelCase : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale='''htk''' , )
lowerCAmelCase : List[str] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def _A ( self : Any ):
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _A ( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] = None ):
lowerCAmelCase : Dict = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _A ( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ):
lowerCAmelCase : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase : Dict = np.random.choice(ranges[0] )
lowerCAmelCase : Tuple = np.random.choice(ranges[1] )
lowerCAmelCase : Union[str, Any] = np.random.choice(ranges[2] )
lowerCAmelCase : List[str] = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase : Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase : str = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase : Any = torch.tensor(mel[None, None, :] )
lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy()
lowerCAmelCase : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _A ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Any ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase : Optional[int] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase : List[Any] = len(lowerCamelCase__ ) - max_length
lowerCAmelCase : Optional[Any] = np.random.randint(0 , overflow + 1 )
lowerCAmelCase : str = waveform[idx : idx + max_length]
lowerCAmelCase : Any = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase : Union[str, Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
lowerCAmelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase : str = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase : Optional[int] = False
else:
lowerCAmelCase : Dict = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : List[str] = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase : Any = int(max_length / len(lowerCamelCase__ ) )
lowerCAmelCase : List[Any] = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase : Optional[Any] = int(max_length / len(lowerCamelCase__ ) )
lowerCAmelCase : str = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase : int = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase : Dict = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
lowerCAmelCase : str = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase : Any = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int = None , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : List[Any] = None , lowerCamelCase__ : int = None , lowerCamelCase__ : List[str] = None , **lowerCamelCase__ : Any , ):
lowerCAmelCase : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase : str = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase : int = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase : int = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
lowerCAmelCase : Union[str, Any] = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase : Optional[int] = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase : Tuple = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
lowerCAmelCase : List[str] = []
lowerCAmelCase : int = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase : Optional[int] = np.random.randint(0 , len(lowerCamelCase__ ) )
lowerCAmelCase : Any = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
lowerCAmelCase : List[Any] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCAmelCase : List[str] = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
lowerCAmelCase : str = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 348
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a__ :
def __init__( self , A = None ) -> None:
'''simple docstring'''
if components is None:
a = []
a = list(A )
def __len__( self ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self ) -> str:
'''simple docstring'''
return "(" + ",".join(map(A , self.__components ) ) + ")"
def __add__( self , A ) -> Vector:
'''simple docstring'''
a = len(self )
if size == len(A ):
a = [self.__components[i] + other.component(A ) for i in range(A )]
return Vector(A )
else:
raise Exception("must have the same size" )
def __sub__( self , A ) -> Vector:
'''simple docstring'''
a = len(self )
if size == len(A ):
a = [self.__components[i] - other.component(A ) for i in range(A )]
return Vector(A )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , A ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self , A ) -> float:
'''simple docstring'''
...
def __mul__( self , A ) -> float | Vector:
'''simple docstring'''
if isinstance(A , (float, int) ):
a = [c * other for c in self.__components]
return Vector(A )
elif isinstance(A , A ) and len(self ) == len(A ):
a = len(self )
a = [self.__components[i] * other.component(A ) for i in range(A )]
return sum(A )
else: # error case
raise Exception("invalid operand!" )
def lowerCAmelCase_ ( self ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def lowerCAmelCase_ ( self , A ) -> float:
'''simple docstring'''
if isinstance(A , A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def lowerCAmelCase_ ( self , A , A ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
a = value
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
a = [c**2 for c in self.__components]
return math.sqrt(sum(A ) )
def lowerCAmelCase_ ( self , A , A = False ) -> float:
'''simple docstring'''
a = self * other
a = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Vector:
assert isinstance(__UpperCamelCase , __UpperCamelCase)
return Vector([0] * dimension)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Vector:
assert isinstance(__UpperCamelCase , __UpperCamelCase) and (isinstance(__UpperCamelCase , __UpperCamelCase))
a = [0] * dimension
a = 1
return Vector(__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Vector:
assert (
isinstance(__UpperCamelCase , __UpperCamelCase)
and isinstance(__UpperCamelCase , __UpperCamelCase)
and (isinstance(__UpperCamelCase , (int, float)))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Vector:
random.seed(__UpperCamelCase)
a = [random.randint(__UpperCamelCase , __UpperCamelCase) for _ in range(__UpperCamelCase)]
return Vector(__UpperCamelCase)
class a__ :
def __init__( self , A , A , A ) -> None:
'''simple docstring'''
a = matrix
a = w
a = h
def __str__( self ) -> str:
'''simple docstring'''
a = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] + other.component(A , A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , A ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] - other.component(A , A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , A ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self , A ) -> Vector:
'''simple docstring'''
...
def __mul__( self , A ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(A , A ): # matrix-vector
if len(A ) == self.__width:
a = zero_vector(self.__height )
for i in range(self.__height ):
a = [
self.__matrix[i][j] * other.component(A )
for j in range(self.__width )
]
ans.change_component(A , sum(A ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(A , (int, float) ): # matrix-scalar
a = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A , self.__width , self.__height )
return None
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.__height
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.__width
def lowerCAmelCase_ ( self , A , A ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def lowerCAmelCase_ ( self , A , A , A ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
a = value
else:
raise Exception("change_component: indices out of bounds" )
def lowerCAmelCase_ ( self , A , A ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
a = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A ) ):
a = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A , self.__width - 1 , self.__height - 1 ).determinant()
def lowerCAmelCase_ ( self , A , A ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A , A )
else:
raise Exception("Indices out of bounds" )
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
a = [
self.__matrix[0][y] * self.cofactor(0 , A ) for y in range(self.__width )
]
return sum(A )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Matrix:
a = [[0] * n for _ in range(__UpperCamelCase)]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Matrix:
random.seed(__UpperCamelCase)
a = [
[random.randint(__UpperCamelCase , __UpperCamelCase) for _ in range(__UpperCamelCase)] for _ in range(__UpperCamelCase)
]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
| 515
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE ={
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE ={
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__SCREAMING_SNAKE_CASE ={
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : Any = DistilBertTokenizer
def __init__( self: int , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: int=True , _lowerCamelCase: Any="[UNK]" , _lowerCamelCase: List[Any]="[SEP]" , _lowerCamelCase: Optional[Any]="[PAD]" , _lowerCamelCase: List[Any]="[CLS]" , _lowerCamelCase: List[Any]="[MASK]" , _lowerCamelCase: List[Any]=True , _lowerCamelCase: Any=None , **_lowerCamelCase: Optional[int] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = do_lower_case
def _A ( self: int , _lowerCamelCase: Any , _lowerCamelCase: List[str]=None ):
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self: Optional[Any] , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self: Tuple , _lowerCamelCase: str , _lowerCamelCase: Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 704
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = "Wav2Vec2FeatureExtractor"
SCREAMING_SNAKE_CASE__ : List[Any] = "AutoTokenizer"
def __init__( self: Tuple , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _A ( cls: List[Any] , _lowerCamelCase: Tuple , **_lowerCamelCase: List[str] ):
try:
return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase )
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
def __call__( self: Union[str, Any] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _A ( self: Optional[int] , *_lowerCamelCase: str , **_lowerCamelCase: List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _A ( self: str , *_lowerCamelCase: Dict , **_lowerCamelCase: Dict ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Optional[int] , *_lowerCamelCase: Optional[int] , **_lowerCamelCase: Tuple ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _A ( self: Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 89
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = 0
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : int = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowercase , 'vocab.txt' ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type='bert' , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowercase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowercase , 'merges.txt' ) )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type='gpt2' , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowercase , 'vocab.txt' ) )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type='bert' )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowercase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowercase , 'merges.txt' ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type='gpt2' )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : List[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
A_ : Optional[Any] = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = TOKENIZER_MAPPING.values()
A_ : Optional[int] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , lowercase )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=lowercase )
A_ : Optional[int] = 'Hello, world. How are you?'
A_ : Optional[Any] = tokenizer.tokenize(lowercase )
self.assertEqual('[UNK]' , tokens[0] )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=lowercase )
A_ : int = tokenizer.tokenize(lowercase )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = get_tokenizer_config('bert-base-cased' )
A_ : Any = config.pop('_commit_hash' , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : int = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : str = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Any = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[int] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase ):
A_ : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Tuple = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase )
A_ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
A_ : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : str = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = False
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = NewTokenizer
lowerCamelCase_ = False
try:
AutoConfig.register('custom' , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
A_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : Optional[int] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
A_ : Tuple = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : str = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Tuple = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
A_ : List[str] = AutoTokenizer.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
A_ : str = AutoTokenizer.from_pretrained(lowercase , revision='aaaaaa' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
A_ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 558
|
import argparse
import os
import re
import packaging.version
_UpperCAmelCase = """examples/"""
_UpperCAmelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_UpperCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_UpperCAmelCase = """README.md"""
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Any ,__lowercase : Tuple ):
'''simple docstring'''
with open(__lowercase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
A_ : Dict = f.read()
A_ , A_ : Dict = REPLACE_PATTERNS[pattern]
A_ : List[str] = replace.replace('VERSION' ,__lowercase )
A_ : int = re_pattern.sub(__lowercase ,__lowercase )
with open(__lowercase ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.write(__lowercase )
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(__lowercase ,__lowercase ) ,__lowercase ,pattern='examples' )
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowercase ,__lowercase ,__lowercase )
if not patch:
update_version_in_examples(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = '🤗 Transformers currently provides the following architectures'
A_ : str = '1. Want to contribute a new model?'
with open(__lowercase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
A_ : int = f.readlines()
# Find the start of the list.
A_ : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
A_ : str = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' ,'https://huggingface.co/docs/transformers/model_doc' ,)
index += 1
with open(__lowercase ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES['init'] ,'r' ) as f:
A_ : Any = f.read()
A_ : Union[str, Any] = REPLACE_PATTERNS['init'][0].search(__lowercase ).groups()[0]
return packaging.version.parse(__lowercase )
def UpperCamelCase ( __lowercase : Tuple=False ):
'''simple docstring'''
A_ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
A_ : Any = default_version.base_version
elif patch:
A_ : str = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ : Dict = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__lowercase ) == 0:
A_ : Union[str, Any] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowercase ,patch=__lowercase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = get_version()
A_ : List[str] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ : List[str] = current_version.base_version
# Check with the user we got that right.
A_ : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__lowercase ) == 0:
A_ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowercase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 558
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase : Optional[Any] = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowercase : Union[str, Any] = {
'unc-nlp/lxmert-base-uncased': 5_1_2,
}
lowercase : Dict = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class _lowerCAmelCase ( _snake_case ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = LxmertTokenizer
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : List[str]="[UNK]" , SCREAMING_SNAKE_CASE : Dict="[SEP]" , SCREAMING_SNAKE_CASE : Any="[PAD]" , SCREAMING_SNAKE_CASE : List[str]="[CLS]" , SCREAMING_SNAKE_CASE : Union[str, Any]="[MASK]" , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Any=None , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase = do_lower_case
def __A ( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=None ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 701
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : Dict = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : List[str] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __a ( A__ ) -> int:
lowerCAmelCase = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A__ )[0]
@deprecated(A__ , "Please use tf.data to implement this functionality." )
def __a ( A__ ) -> List[str]:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowerCAmelCase = _readaa(A__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = bytestream.read(rows * cols * num_images )
lowerCAmelCase = numpy.frombuffer(A__ , dtype=numpy.uinta )
lowerCAmelCase = data.reshape(A__ , A__ , A__ , 1 )
return data
@deprecated(A__ , "Please use tf.one_hot on tensors." )
def __a ( A__ , A__ ) -> Tuple:
lowerCAmelCase = labels_dense.shape[0]
lowerCAmelCase = numpy.arange(A__ ) * num_classes
lowerCAmelCase = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase = 1
return labels_one_hot
@deprecated(A__ , "Please use tf.data to implement this functionality." )
def __a ( A__ , A__=False , A__=10 ) -> Optional[int]:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowerCAmelCase = _readaa(A__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = bytestream.read(A__ )
lowerCAmelCase = numpy.frombuffer(A__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A__ , A__ )
return labels
class _lowerCAmelCase :
"""simple docstring"""
@deprecated(
SCREAMING_SNAKE_CASE , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[int]=dtypes.floataa , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=None , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowerCAmelCase = 1_0_0_0_0
lowerCAmelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase = images.astype(numpy.floataa )
lowerCAmelCase = numpy.multiply(SCREAMING_SNAKE_CASE , 1.0 / 2_5_5.0 )
lowerCAmelCase = images
lowerCAmelCase = labels
lowerCAmelCase = 0
lowerCAmelCase = 0
@property
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
return self._images
@property
def __A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self._labels
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._num_examples
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self._epochs_completed
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=True ) -> Optional[Any]:
"""simple docstring"""
if fake_data:
lowerCAmelCase = [1] * 7_8_4
lowerCAmelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.images[perma]
lowerCAmelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase = self._num_examples - start
lowerCAmelCase = self._images[start : self._num_examples]
lowerCAmelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.images[perm]
lowerCAmelCase = self.labels[perm]
# Start next epoch
lowerCAmelCase = 0
lowerCAmelCase = batch_size - rest_num_examples
lowerCAmelCase = self._index_in_epoch
lowerCAmelCase = self._images[start:end]
lowerCAmelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A__ , "Please write your own downloading logic." )
def __a ( A__ , A__ , A__ ) -> Optional[Any]:
if not gfile.Exists(A__ ):
gfile.MakeDirs(A__ )
lowerCAmelCase = os.path.join(A__ , A__ )
if not gfile.Exists(A__ ):
urllib.request.urlretrieve(A__ , A__ ) # noqa: S310
with gfile.GFile(A__ ) as f:
lowerCAmelCase = f.size()
print("Successfully downloaded" , A__ , A__ , "bytes." )
return filepath
@deprecated(
A__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def __a ( A__ , A__=False , A__=False , A__=dtypes.floataa , A__=True , A__=5000 , A__=None , A__=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A__ , one_hot=A__ , dtype=A__ , seed=A__ )
lowerCAmelCase = fake()
lowerCAmelCase = fake()
lowerCAmelCase = fake()
return _Datasets(train=A__ , validation=A__ , test=A__ )
if not source_url: # empty string check
lowerCAmelCase = DEFAULT_SOURCE_URL
lowerCAmelCase = "train-images-idx3-ubyte.gz"
lowerCAmelCase = "train-labels-idx1-ubyte.gz"
lowerCAmelCase = "t10k-images-idx3-ubyte.gz"
lowerCAmelCase = "t10k-labels-idx1-ubyte.gz"
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + train_images_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_images(A__ )
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + train_labels_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_labels(A__ , one_hot=A__ )
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + test_images_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_images(A__ )
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + test_labels_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_labels(A__ , one_hot=A__ )
if not 0 <= validation_size <= len(A__ ):
lowerCAmelCase = (
"Validation size should be between 0 and "
f"{len(A__ )}. Received: {validation_size}."
)
raise ValueError(A__ )
lowerCAmelCase = train_images[:validation_size]
lowerCAmelCase = train_labels[:validation_size]
lowerCAmelCase = train_images[validation_size:]
lowerCAmelCase = train_labels[validation_size:]
lowerCAmelCase = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowerCAmelCase = _DataSet(A__ , A__ , **A__ )
lowerCAmelCase = _DataSet(A__ , A__ , **A__ )
lowerCAmelCase = _DataSet(A__ , A__ , **A__ )
return _Datasets(train=A__ , validation=A__ , test=A__ )
| 159
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Tuple:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Union[str, Any] = eval_examples
__lowercase : Union[str, Any] = post_process_function
__lowercase : Any = quant_trainer_args
__lowercase : Optional[Any] = 1_28 # default number of calibration samples
def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__lowercase : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset
__lowercase : str = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' )
return DataLoader(
UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , )
def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any:
__lowercase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
__lowercase : List[Any] = self.get_calib_dataloader(UpperCamelCase_ )
__lowercase : Dict = self.model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase_ )
logger.info('''***** Running calibration *****''' )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCamelCase_ ):
# Prediction step
__lowercase ,__lowercase ,__lowercase : Optional[Any] = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args )
__lowercase : Tuple = model
def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> str:
__lowercase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase : Union[str, Any] = self.get_eval_dataloader(UpperCamelCase_ )
__lowercase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : Optional[int] = self.compute_metrics
__lowercase : Dict = None
__lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase : Tuple = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
__lowercase : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__lowercase : int = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
__lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase : List[str] = metrics.pop(UpperCamelCase_ )
self.log(UpperCamelCase_ )
else:
__lowercase : Dict = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> List[Any]:
__lowercase : Optional[int] = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : str = self.compute_metrics
__lowercase : Dict = None
__lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase : Union[str, Any] = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
__lowercase : Any = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase : Dict = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' )
__lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase : List[str] = metrics.pop(UpperCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_="./" ) -> int:
__lowercase : Optional[int] = self.eval_dataset
__lowercase : Optional[int] = self.get_eval_dataloader(UpperCamelCase_ )
__lowercase : Any = next(iter(UpperCamelCase_ ) )
# saving device - to make it consistent
__lowercase : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__lowercase : Tuple = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__lowercase : List[Any] = True
__lowercase : int = self.model.to(UpperCamelCase_ )
model.eval()
model.float()
__lowercase : Optional[int] = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args )
__lowercase : Tuple = os.path.join(UpperCamelCase_ , '''model.onnx''' )
logger.info(F"""exporting model to {output_model_file}""" )
__lowercase : Tuple = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCamelCase_ , )
logger.info('''onnx export finished''' )
| 76
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def lowerCAmelCase_ ( lowercase: np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def lowerCAmelCase_ ( lowercase: np.ndarray , lowercase: np.ndarray , lowercase: int ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase: Dict = np.nan
for i in range(lowercase ):
_UpperCamelCase: Any = features[:, labels == i]
_UpperCamelCase: Union[str, Any] = data.mean(1 )
# Centralize the data of class i
_UpperCamelCase: Dict = data - column_reshape(lowercase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCamelCase: int = np.dot(lowercase , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCAmelCase_ ( lowercase: np.ndarray , lowercase: np.ndarray , lowercase: int ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase: List[str] = features.mean(1 )
_UpperCamelCase: List[str] = np.nan
for i in range(lowercase ):
_UpperCamelCase: int = features[:, labels == i]
_UpperCamelCase: List[str] = data.shape[1]
_UpperCamelCase: Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase ) - column_reshape(lowercase ) , (column_reshape(lowercase ) - column_reshape(lowercase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCamelCase: Dict = device_data * np.dot(
column_reshape(lowercase ) - column_reshape(lowercase ) , (column_reshape(lowercase ) - column_reshape(lowercase )).T , )
return covariance_sum / features.shape[1]
def lowerCAmelCase_ ( lowercase: np.ndarray , lowercase: int ) -> np.ndarray:
'''simple docstring'''
# Check if the features have been loaded
if features.any():
_UpperCamelCase: List[str] = features.mean(1 )
# Center the dataset
_UpperCamelCase: List[Any] = features - np.reshape(lowercase , (data_mean.size, 1) )
_UpperCamelCase: Tuple = np.dot(lowercase , centered_data.T ) / features.shape[1]
_UpperCamelCase , _UpperCamelCase: str = np.linalg.eigh(lowercase )
# Take all the columns in the reverse order (-1), and then takes only the first
_UpperCamelCase: List[str] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_UpperCamelCase: str = np.dot(filtered_eigenvectors.T , lowercase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=lowercase )
logging.error('''Dataset empty''' )
raise AssertionError
def lowerCAmelCase_ ( lowercase: np.ndarray , lowercase: np.ndarray , lowercase: int , lowercase: int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_UpperCamelCase , _UpperCamelCase: Tuple = eigh(
covariance_between_classes(lowercase , lowercase , lowercase ) , covariance_within_classes(lowercase , lowercase , lowercase ) , )
_UpperCamelCase: Optional[Any] = eigenvectors[:, ::-1][:, :dimensions]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Tuple = np.linalg.svd(lowercase )
_UpperCamelCase: Any = svd_matrix[:, 0:dimensions]
_UpperCamelCase: int = np.dot(filtered_svd_matrix.T , lowercase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=lowercase )
logging.error('''Dataset empty''' )
raise AssertionError
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
# Create dummy dataset with 2 classes and 3 features
_UpperCamelCase: str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_UpperCamelCase: int = np.array([0, 0, 0, 1, 1] )
_UpperCamelCase: str = 2
_UpperCamelCase: List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase ) as error_info:
_UpperCamelCase: str = linear_discriminant_analysis(
lowercase , lowercase , lowercase , lowercase )
if isinstance(lowercase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
_UpperCamelCase: Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_UpperCamelCase: Optional[Any] = 2
_UpperCamelCase: str = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase ) as error_info:
_UpperCamelCase: List[str] = principal_component_analysis(lowercase , lowercase )
if not np.allclose(lowercase , lowercase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _lowercase : Optional[int] , _lowercase : Tuple=13 , _lowercase : str=7 , _lowercase : List[Any]=True , _lowercase : Optional[int]=True , _lowercase : str=True , _lowercase : Optional[int]=True , _lowercase : Dict=99 , _lowercase : List[Any]=32 , _lowercase : List[str]=5 , _lowercase : str=4 , _lowercase : int=37 , _lowercase : List[str]="gelu" , _lowercase : Any=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : Dict=512 , _lowercase : int=16 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : List[Any]=4 , ):
"""simple docstring"""
_UpperCamelCase: List[str] = parent
_UpperCamelCase: int = batch_size
_UpperCamelCase: List[str] = seq_length
_UpperCamelCase: Optional[int] = is_training
_UpperCamelCase: Optional[Any] = use_attention_mask
_UpperCamelCase: Any = use_token_type_ids
_UpperCamelCase: List[str] = use_labels
_UpperCamelCase: Optional[int] = vocab_size
_UpperCamelCase: List[str] = hidden_size
_UpperCamelCase: Union[str, Any] = num_hidden_layers
_UpperCamelCase: Any = num_attention_heads
_UpperCamelCase: List[str] = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_act
_UpperCamelCase: Dict = hidden_dropout_prob
_UpperCamelCase: List[str] = attention_probs_dropout_prob
_UpperCamelCase: str = max_position_embeddings
_UpperCamelCase: Dict = type_vocab_size
_UpperCamelCase: Tuple = type_sequence_label_size
_UpperCamelCase: List[Any] = initializer_range
_UpperCamelCase: str = num_choices
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Any = None
if self.use_attention_mask:
_UpperCamelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase: Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase: Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: int = config_and_inputs
_UpperCamelCase: int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase: int = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_UpperCamelCase: Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase: Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
_UpperCamelCase: Tuple = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
_UpperCamelCase: Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 271
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = RobertaTokenizer
__snake_case = RobertaTokenizerFast
__snake_case = True
__snake_case = {"""cls_token""": """<s>"""}
def _snake_case ( self: List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__lowerCamelCase : Optional[Any] = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCamelCase : int = {'unk_token': '<unk>'}
__lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: Union[str, Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Optional[Any] = 'lower newer'
return input_text, output_text
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Dict = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowerCamelCase : Optional[Any] = tokenizer.tokenize(a ) # , add_prefix_space=True)
self.assertListEqual(a , a )
__lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
__lowerCamelCase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=a ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=a ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('roberta-base' )
__lowerCamelCase : str = tokenizer.encode('sequence builders' , add_special_tokens=a )
__lowerCamelCase : List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
__lowerCamelCase : List[str] = tokenizer.encode(
'sequence builders' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Dict = self.get_tokenizer()
__lowerCamelCase : Any = 'Encode this sequence.'
__lowerCamelCase : str = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__lowerCamelCase : List[Any] = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a , a )
__lowerCamelCase : Any = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a , a )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__lowerCamelCase : Tuple = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a , a )
# Testing spaces after special tokens
__lowerCamelCase : str = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(a , lstrip=a , rstrip=a )} ) # mask token has a left space
__lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
__lowerCamelCase : List[str] = 'Encode <mask> sequence'
__lowerCamelCase : Any = 'Encode <mask>sequence'
__lowerCamelCase : Any = tokenizer.encode(a )
__lowerCamelCase : Tuple = encoded.index(a )
__lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a , a )
__lowerCamelCase : Any = tokenizer.encode(a )
__lowerCamelCase : List[Any] = encoded.index(a )
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a , a )
def _snake_case ( self: Any ):
pass
def _snake_case ( self: int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = 'A, <mask> AllenNLP sentence.'
__lowerCamelCase : int = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
__lowerCamelCase : Any = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__lowerCamelCase : int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__lowerCamelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _snake_case ( self: str ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowerCamelCase : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , a )
self.assertEqual(post_processor_state['add_prefix_space'] , a )
self.assertEqual(post_processor_state['trim_offsets'] , a )
def _snake_case ( self: List[str] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Tuple = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : str = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : Optional[int] = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : Tuple = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : List[str] = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase : int = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : Optional[Any] = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : List[Any] = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase : Optional[Any] = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
| 230
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 230
| 1
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase :
def __init__( self : Any , _lowercase : Dict , _lowercase : int=2 , _lowercase : Any=32 , _lowercase : Tuple=16 , _lowercase : List[Any]=3 , _lowercase : List[Any]=True , _lowercase : str=True , _lowercase : int=32 , _lowercase : Optional[Any]=4 , _lowercase : Optional[Any]=[0, 1, 2, 3] , _lowercase : Union[str, Any]=4 , _lowercase : List[Any]=37 , _lowercase : int="gelu" , _lowercase : List[Any]=0.1 , _lowercase : str=0.1 , _lowercase : List[str]=0.02 , _lowercase : Union[str, Any]=3 , _lowercase : List[Any]=[1, 3_84, 24, 24] , _lowercase : Union[str, Any]=True , _lowercase : int=None , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = backbone_out_indices
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE__ : Tuple = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : List[str] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Optional[int] = num_patches + 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowercase__ ( self : str , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowercase__ ( self : Tuple , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase : Optional[int] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] = False
lowerCamelCase : List[str] = False
lowerCamelCase : str = False
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = DPTModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def lowercase__ ( self : List[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = True
if model_class in get_values(_lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : List[Any] = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(config=_lowercase )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE__ : Dict = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
def lowercase__ ( self : Tuple ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE__ : List[Any] = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Optional[int] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''add'''
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = DPTForDepthEstimation(_lowercase )
def a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _lowercase , atol=1E-4 ) )
| 35
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 135
| 0
|
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675
|
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675
| 1
|
'''simple docstring'''
def UpperCamelCase ( lowercase_ : list , lowercase_ : int = 0 ) -> list:
'''simple docstring'''
lowercase =length or len(lowercase_ )
lowercase =False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowercase , lowercase =list_data[i + 1], list_data[i]
lowercase =True
return list_data if not swapped else bubble_sort(lowercase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = RoCBertTokenizer
_snake_case : int = None
_snake_case : Optional[Any] = False
_snake_case : Tuple = True
_snake_case : Union[str, Any] = filter_non_english
def A ( self : List[str] )-> Dict:
super().setUp()
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__UpperCamelCase = {}
__UpperCamelCase = {}
for i, value in enumerate(A_ ):
__UpperCamelCase = i
__UpperCamelCase = i
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def A ( self : Dict )-> Optional[Any]:
__UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(A_ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def A ( self : List[Any] )-> Dict:
__UpperCamelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def A ( self : str )-> Dict:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : Union[str, Any] )-> Optional[Any]:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def A ( self : Any )-> Optional[Any]:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A ( self : List[Any] )-> int:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : Optional[Any] )-> str:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : Any )-> int:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=A_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def A ( self : int )-> int:
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__UpperCamelCase = {}
for i, token in enumerate(A_ ):
__UpperCamelCase = i
__UpperCamelCase = RoCBertWordpieceTokenizer(vocab=A_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def A ( self : int )-> Tuple:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def A ( self : Optional[int] )-> Union[str, Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def A ( self : str )-> str:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
__UpperCamelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def A ( self : str )-> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__UpperCamelCase = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase = tokenizer_r.do_lower_case if hasattr(A_ , "do_lower_case" ) else False
__UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def A ( self : Tuple )-> Union[str, Any]:
__UpperCamelCase = ["的", "人", "有"]
__UpperCamelCase = "".join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = True
__UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase = False
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase = tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def A ( self : List[Any] )-> int:
__UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase = tokenizer.encode("你好" , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode("你是谁" , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A ( self : Optional[Any] )-> Tuple:
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase = "你好,你是谁"
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase = tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 505
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = 'roc_bert'
def __init__( self , __UpperCAmelCase=3_0_5_2_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=9_1_0 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2_4_8_5_8 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :str = vocab_size
lowerCAmelCase__ :Tuple = max_position_embeddings
lowerCAmelCase__ :List[Any] = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :Union[str, Any] = hidden_act
lowerCAmelCase__ :List[str] = hidden_dropout_prob
lowerCAmelCase__ :str = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :Optional[int] = layer_norm_eps
lowerCAmelCase__ :Dict = use_cache
lowerCAmelCase__ :str = enable_pronunciation
lowerCAmelCase__ :Union[str, Any] = enable_shape
lowerCAmelCase__ :List[str] = pronunciation_embed_dim
lowerCAmelCase__ :Union[str, Any] = pronunciation_vocab_size
lowerCAmelCase__ :Union[str, Any] = shape_embed_dim
lowerCAmelCase__ :str = shape_vocab_size
lowerCAmelCase__ :List[Any] = concat_input
lowerCAmelCase__ :Any = position_embedding_type
lowerCAmelCase__ :Dict = classifier_dropout
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 718
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :Tuple = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :Dict = 3
lowerCAmelCase__ :int = (3_2, 3_2)
lowerCAmelCase__ :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
lowerCAmelCase__ :int = self.dummy_input
return init_dict, inputs_dict
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :List[str] = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :str = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowerCAmelCase__ :Dict = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model_accelerate.to(__UpperCAmelCase )
model_accelerate.eval()
lowerCAmelCase__ :List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :List[str] = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model_accelerate(__UpperCAmelCase , __UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase , low_cpu_mem_usage=__UpperCAmelCase )
model_normal_load.to(__UpperCAmelCase )
model_normal_load.eval()
lowerCAmelCase__ :Optional[int] = model_normal_load(__UpperCAmelCase , __UpperCAmelCase )['sample']
assert torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :int = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Tuple = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) )
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = UNetaDModel
__magic_name__ :Optional[int] = """sample"""
@property
def snake_case ( self , __UpperCAmelCase=(3_2, 3_2) ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 4
lowerCAmelCase__ :int = 3
lowerCAmelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowerCAmelCase__ :Any = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.dummy_input
lowerCAmelCase__ :Union[str, Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = noise
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 4
lowerCAmelCase__ :Any = 3
lowerCAmelCase__ :Dict = (2_5_6, 2_5_6)
lowerCAmelCase__ :int = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :str = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :List[Any] = 3
lowerCAmelCase__ :Dict = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Any = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
pass
| 560
| 0
|
# flake8: noqa
# Lint as: python3
UpperCamelCase = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 45
|
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase = set()
return any(
node not in visited and depth_first_search(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
for node in graph )
def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
visited.add(__magic_name__ )
rec_stk.add(__magic_name__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__magic_name__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 441
| 0
|
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[int] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase :int = ""
lowercase :Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase , lowercase :Optional[Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase :Optional[int] = [1 for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
# for each character in new_string find corresponding palindromic string
lowercase :Dict = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase :str = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase :Union[str, Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase :List[str] = j - k + 1 # noqa: E741
lowercase :List[str] = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase :Any = length[j]
lowercase :List[str] = j
# create that string
lowercase :Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Optional[int] = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 453
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowercase : Optional[int] ='''
Human: <<task>>
Assistant: '''
_lowercase : str ='''huggingface-tools/default-prompts'''
_lowercase : Optional[int] ={'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def A__ ( lowercase: Dict, lowercase: Dict, lowercase: Optional[Any]="run" ) -> Any:
if prompt_or_repo_id is None:
A : List[str] =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s', __A ) is not None:
return prompt_or_repo_id
A : Dict =cached_file(
__A, PROMPT_FILES[mode], repo_type='dataset', user_agent={'agent': agent_name} )
with open(__A, 'r', encoding='utf-8' ) as f:
return f.read()
| 305
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = KandinskyVaaPipeline
lowercase_ = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowercase_ = ["""image_embeds""", """negative_image_embeds"""]
lowercase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ = False
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 1_0_0
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A =UNetaDConditionModel(**lowercase__ )
return model
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.dummy_unet
__A =self.dummy_movq
__A =DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase__ , )
__A ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self , lowercase__ , lowercase__=0 ):
'''simple docstring'''
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase__ )
if str(lowercase__ ).startswith('''mps''' ):
__A =torch.manual_seed(lowercase__ )
else:
__A =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__A ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ='''cpu'''
__A =self.get_dummy_components()
__A =self.pipeline_class(**lowercase__ )
__A =pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__A =pipe(**self.get_dummy_inputs(lowercase__ ) )
__A =output.images
__A =pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
__A =image[0, -3:, -3:, -1]
__A =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__A =np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__A =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
__A =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__A =pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
__A ='''red cat, 4k photo'''
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A , __A =pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A =pipeline(
image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=1_0_0 , output_type='''np''' , )
__A =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 184
| 0
|
def snake_case__ ( a , a , a = 0 , a = 0 ) -> int:
'''simple docstring'''
snake_case__ = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
a__ = {'''bert_for_seq_generation''': 512}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : Optional[Any]="<::::>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Dict , ):
'''simple docstring'''
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCAmelCase( self : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCamelCase )
def __lowerCAmelCase( self : str , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def __lowerCAmelCase( self : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = []
snake_case__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
snake_case__ = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __lowerCAmelCase( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 566
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if "cls_token" in name:
UpperCAmelCase__ : Any = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
UpperCAmelCase__ : int = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
UpperCAmelCase__ : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCAmelCase__ : List[str] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase__ : List[str] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
UpperCAmelCase__ : Tuple = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
UpperCAmelCase__ : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase__ : List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase__ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase__ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
UpperCAmelCase__ : List[str] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
UpperCAmelCase__ : List[Any] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
UpperCAmelCase__ : Any = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
UpperCAmelCase__ : List[str] = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Tuple = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
UpperCAmelCase__ : Optional[int] = key.split(""".""" )
UpperCAmelCase__ : Union[str, Any] = int(key_split[1] )
if "decoder_blocks" in key:
UpperCAmelCase__ : Optional[int] = config.decoder_hidden_size
UpperCAmelCase__ : Optional[int] = """decoder.decoder_layers."""
if "weight" in key:
UpperCAmelCase__ : Union[str, Any] = val[:dim, :]
UpperCAmelCase__ : Any = val[dim : dim * 2, :]
UpperCAmelCase__ : Optional[Any] = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase__ : Optional[int] = val[:dim]
UpperCAmelCase__ : Any = val[dim : dim * 2]
UpperCAmelCase__ : List[Any] = val[-dim:]
else:
UpperCAmelCase__ : Union[str, Any] = config.hidden_size
UpperCAmelCase__ : List[str] = """vit.encoder.layer."""
if "weight" in key:
UpperCAmelCase__ : int = val[:dim, :]
UpperCAmelCase__ : Dict = val[dim : dim * 2, :]
UpperCAmelCase__ : Any = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase__ : str = val[:dim]
UpperCAmelCase__ : Tuple = val[dim : dim * 2]
UpperCAmelCase__ : Dict = val[-dim:]
else:
UpperCAmelCase__ : Tuple = val
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCAmelCase__ : int = 1024
UpperCAmelCase__ : List[str] = 4096
UpperCAmelCase__ : List[Any] = 24
UpperCAmelCase__ : Any = 16
elif "huge" in checkpoint_url:
UpperCAmelCase__ : List[str] = 14
UpperCAmelCase__ : int = 1280
UpperCAmelCase__ : int = 5120
UpperCAmelCase__ : int = 32
UpperCAmelCase__ : List[Any] = 16
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
UpperCAmelCase__ : Any = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : Dict = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase__ : Optional[Any] = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
UpperCAmelCase__ : Tuple = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase__ : List[Any] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase__ : Any = model(**__UpperCamelCase )
UpperCAmelCase__ : int = outputs.logits
if "large" in checkpoint_url:
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCAmelCase__ : List[str] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 65
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase : Tuple = 2_56
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = ['''melgan''']
def __init__( self: List[str] ,a: SpectrogramNotesEncoder ,a: SpectrogramContEncoder ,a: TaFilmDecoder ,a: DDPMScheduler ,a: OnnxRuntimeModel if is_onnx_available() else Any ,):
super().__init__()
# From MELGAN
__UpperCAmelCase = math.log(1e-5 ) # Matches MelGAN training.
__UpperCAmelCase = 4.0 # Largest value for most examples
__UpperCAmelCase = 128
self.register_modules(
notes_encoder=a ,continuous_encoder=a ,decoder=a ,scheduler=a ,melgan=a ,)
def snake_case ( self: List[str] ,a: Union[str, Any] ,a: Optional[Any]=(-1.0, 1.0) ,a: Optional[int]=False ):
__UpperCAmelCase , __UpperCAmelCase = output_range
if clip:
__UpperCAmelCase = torch.clip(a ,self.min_value ,self.max_value )
# Scale to [0, 1].
__UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case ( self: List[Any] ,a: List[str] ,a: int=(-1.0, 1.0) ,a: Optional[int]=False ):
__UpperCAmelCase , __UpperCAmelCase = input_range
__UpperCAmelCase = torch.clip(a ,a ,a ) if clip else outputs
# Scale to [0, 1].
__UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case ( self: Optional[int] ,a: Any ,a: Optional[Any] ,a: Optional[Any] ):
__UpperCAmelCase = input_tokens > 0
__UpperCAmelCase , __UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=a ,encoder_inputs_mask=a )
__UpperCAmelCase , __UpperCAmelCase = self.continuous_encoder(
encoder_inputs=a ,encoder_inputs_mask=a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case ( self: Optional[int] ,a: int ,a: str ,a: Dict ):
__UpperCAmelCase = noise_time
if not torch.is_tensor(a ):
__UpperCAmelCase = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
__UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
__UpperCAmelCase = self.decoder(
encodings_and_masks=a ,decoder_input_tokens=a ,decoder_noise_time=a )
return logits
@torch.no_grad()
def __call__( self: str ,a: List[List[int]] ,a: Optional[torch.Generator] = None ,a: int = 100 ,a: bool = True ,a: str = "numpy" ,a: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,a: int = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a ,a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
__UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
__UpperCAmelCase = np.zeros([1, 0, self.n_dims] ,np.floataa )
__UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=a ,device=self.device )
for i, encoder_input_tokens in enumerate(a ):
if i == 0:
__UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
__UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=a ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCAmelCase = ones
__UpperCAmelCase = self.scale_features(
a ,output_range=[-1.0, 1.0] ,clip=a )
__UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=a ,continuous_mask=a ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=a ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase = self.decode(
encodings_and_masks=a ,input_tokens=a ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(a ,a ,a ,generator=a ).prev_sample
__UpperCAmelCase = self.scale_to_features(a ,input_range=[-1.0, 1.0] )
__UpperCAmelCase = mel[:1]
__UpperCAmelCase = mel.cpu().float().numpy()
__UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a ,a )
logger.info('Generated segment' ,a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=a )
| 396
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCamelCase ( __a ):
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def _lowerCamelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ArgumentParser(
'''HuggingFace Datasets CLI tool''', usage='''datasets-cli <command> [<args>]''', allow_abbrev=__a )
SCREAMING_SNAKE_CASE_ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__a )
EnvironmentCommand.register_subcommand(__a )
TestCommand.register_subcommand(__a )
RunBeamCommand.register_subcommand(__a )
DummyDataCommand.register_subcommand(__a )
# Parse args
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_known_args()
if not hasattr(__a, '''func''' ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE_ = parse_unknown_args(__a )
# Run
SCREAMING_SNAKE_CASE_ = args.func(__a, **__a )
service.run()
if __name__ == "__main__":
main()
| 711
|
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.