code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowerCamelCase__ )
lowerCamelCase = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase__ )
EnvironmentCommand.register_subcommand(lowerCamelCase__ )
TestCommand.register_subcommand(lowerCamelCase__ )
RunBeamCommand.register_subcommand(lowerCamelCase__ )
DummyDataCommand.register_subcommand(lowerCamelCase__ )
# Parse args
lowerCamelCase , lowerCamelCase = parser.parse_known_args()
if not hasattr(lowerCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
lowerCamelCase = parse_unknown_args(lowerCamelCase__ )
# Run
lowerCamelCase = args.func(lowerCamelCase__ , **lowerCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 252
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
def extract(*A , **A ):
class __lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = torch.ones([0] )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(A )
return self
return Out()
return extract
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=A )
assert isinstance(A , A )
assert isinstance(pipe.scheduler , A )
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowerCamelCase = StableDiffusionPipeline.from_pretrained(A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowerCamelCase = unet.half()
lowerCamelCase = vae.half()
lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowerCamelCase = 40_03_66_03_46
lowerCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowerCamelCase = 27_34_97_17_55
lowerCamelCase = 7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowerCamelCase = 10_44_35_52_34
lowerCamelCase = 12
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 252
| 1
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( _snake_case : int ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase : Optional[Any] = [1, 2, 3]
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ , snake_case_ , num_proc=2 )
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ , snake_case_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = [1, 2]
lowerCAmelCase : List[Any] = {"""a""": 1, """b""": 2}
lowerCAmelCase : List[str] = {"""a""": [1, 2], """b""": [3, 4]}
lowerCAmelCase : Tuple = {"""a""": {"""1""": 1}, """b""": 2}
lowerCAmelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowerCAmelCase : Optional[int] = [2, 3]
lowerCAmelCase : List[str] = {"""a""": 2, """b""": 3}
lowerCAmelCase : Any = {"""a""": [2, 3], """b""": [4, 5]}
lowerCAmelCase : Tuple = {"""a""": {"""1""": 2}, """b""": 3}
lowerCAmelCase : List[str] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend('''spark''' ):
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
| 355
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : int = -1
lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : str = TextStreamer(UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
lowerCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = -1
lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : List[str] = ''''''
for new_text in streamer:
streamer_text += new_text
| 314
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class UpperCamelCase ( __snake_case ):
UpperCamelCase : int = '''data2vec-vision'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : str=0.0_2 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : int=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int=[3, 5, 7, 11] , UpperCAmelCase__ : Tuple=[1, 2, 3, 6] , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=0.4 , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Union[str, Any]=255 , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
super().__init__(**UpperCAmelCase__ )
_a : Tuple = hidden_size
_a : Optional[int] = num_hidden_layers
_a : Union[str, Any] = num_attention_heads
_a : Dict = intermediate_size
_a : Any = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : List[Any] = initializer_range
_a : Any = layer_norm_eps
_a : Any = image_size
_a : Union[str, Any] = patch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = use_mask_token
_a : int = use_absolute_position_embeddings
_a : List[str] = use_relative_position_bias
_a : List[Any] = use_shared_relative_position_bias
_a : str = layer_scale_init_value
_a : int = drop_path_rate
_a : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
_a : Tuple = out_indices
_a : Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
_a : int = use_auxiliary_head
_a : List[Any] = auxiliary_loss_weight
_a : str = auxiliary_channels
_a : Union[str, Any] = auxiliary_num_convs
_a : List[Any] = auxiliary_concat_input
_a : Dict = semantic_loss_ignore_index
class UpperCamelCase ( __snake_case ):
UpperCamelCase : int = version.parse('''1.11''' )
@property
def _lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase ( self : str ) -> float:
return 1E-4
| 294
|
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
snake_case__ : str
snake_case__ : str = None
@staticmethod
def _A ( ):
raise NotImplementedError
def _A ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
raise NotImplementedError
def _A ( self : str , __lowerCamelCase : List[Any] ):
raise NotImplementedError
def _A ( self : Dict ):
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _A ( cls : Optional[Any] ):
return F"""`pip install {cls.pip_package or cls.name}`"""
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """optuna"""
@staticmethod
def _A ( ):
return is_optuna_available()
def _A ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : str , **__lowerCamelCase : Dict ):
return run_hp_search_optuna(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Tuple ):
return default_hp_space_optuna(__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = """ray"""
snake_case__ : Optional[int] = """'ray[tune]'"""
@staticmethod
def _A ( ):
return is_ray_available()
def _A ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str , **__lowerCamelCase : str ):
return run_hp_search_ray(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : Any ):
return default_hp_space_ray(__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = """sigopt"""
@staticmethod
def _A ( ):
return is_sigopt_available()
def _A ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : str , **__lowerCamelCase : Any ):
return run_hp_search_sigopt(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : Tuple ):
return default_hp_space_sigopt(__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = """wandb"""
@staticmethod
def _A ( ):
return is_wandb_available()
def _A ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str , **__lowerCamelCase : Any ):
return run_hp_search_wandb(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def _A ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
return default_hp_space_wandb(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
UpperCamelCase :int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__magic_name__ ) > 0:
UpperCamelCase :Optional[Any] = available_backends[0].name
if len(__magic_name__ ) > 1:
logger.info(
f"""{len(__magic_name__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 62
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
UpperCAmelCase_ : List[Any] = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
UpperCAmelCase_ : List[str] = '''▁'''
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase :int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
UpperCamelCase :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = vocab_file
UpperCamelCase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
UpperCamelCase :Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCamelCase :Tuple = len(self.sp_model ) - 1
UpperCamelCase :List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _A ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase :Any = [self.cls_token_id]
UpperCamelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _A ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Any = [self.sep_token_id]
UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self : List[Any] ):
return len(self.sp_model )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self : int , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase :List[Any] = self.sp_model.PieceToId(__lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _A ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :List[Any] = []
UpperCamelCase :str = """"""
UpperCamelCase :Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
UpperCamelCase :List[str] = True
UpperCamelCase :Dict = []
else:
current_sub_tokens.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self : str ):
UpperCamelCase :Tuple = self.__dict__.copy()
UpperCamelCase :str = None
return state
def __setstate__( self : Tuple , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase :Any = {}
UpperCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase :Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
UpperCamelCase :List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 62
| 1
|
class _UpperCamelCase :
def __init__( self :int ) -> int:
UpperCAmelCase__ = {}
def UpperCAmelCase_ ( self :str ) -> Optional[Any]:
print(self.vertex )
for i in self.vertex:
print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase ) for j in self.vertex[i]] ) )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Any , lowerCamelCase :Any ) -> Tuple:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase )
else:
# else make a new vertex
UpperCAmelCase__ = [to_vertex]
def UpperCAmelCase_ ( self :Any ) -> Optional[Any]:
# visited array for storing already visited nodes
UpperCAmelCase__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Dict , lowerCamelCase :List[Any] ) -> int:
# mark start vertex as visited
UpperCAmelCase__ = True
print(__lowerCamelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 169
|
from __future__ import annotations
import math
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__( UpperCamelCase__ : int )->list[int]:
A__ = str(UpperCamelCase__ )
A__ = [n]
for i in range(1 , len(UpperCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
if len(str(UpperCamelCase__ ) ) > 3:
if not is_prime(int(str(UpperCamelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCamelCase__ )[:3] ) ):
return False
return True
def UpperCamelCase__( UpperCamelCase__ : int = 11 )->list[int]:
A__ = []
A__ = 13
while len(UpperCamelCase__ ) != count:
if validate(UpperCamelCase__ ):
A__ = list_truncated_nums(UpperCamelCase__ )
if all(is_prime(UpperCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(UpperCamelCase__ )
num += 2
return list_truncated_primes
def UpperCamelCase__( )->int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(11)) = }")
| 193
| 0
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCAmelCase_ = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = symbols('''ct x y z''')
def lowerCamelCase_ ( _UpperCamelCase ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCamelCase_ ( _UpperCamelCase ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_UpperCamelCase ) ** 2 )
def lowerCamelCase_ ( _UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_UpperCamelCase ), -gamma(_UpperCamelCase ) * beta(_UpperCamelCase ), 0, 0],
[-gamma(_UpperCamelCase ) * beta(_UpperCamelCase ), gamma(_UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
snake_case_ : Any = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCAmelCase_ = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowerCAmelCase_ = {ct: c, x: 1, y: 1, z: 1}
lowerCAmelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 279
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''KEY''')
lowerCAmelCase_ = TypeVar('''VAL''')
@dataclass(frozen=_a, slots=_a )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowerCamelCase_ : KEY
lowerCamelCase_ : VAL
class __lowerCAmelCase ( _Item ):
def __init__(self ) -> None:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
def __bool__(self ) -> bool:
'''simple docstring'''
return False
lowerCAmelCase_ = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__(self , __magic_name__ = 8 , __magic_name__ = 0.75 ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : List[str] = capacity_factor
snake_case_ : int = 0
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return hash(__magic_name__ ) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : Optional[Any] = _Item(__magic_name__ , __magic_name__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : List[Any] = _Item(__magic_name__ , __magic_name__ )
return True
else:
return False
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__magic_name__ )
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : int = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : List[str] = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Optional[int] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase (self , __magic_name__ ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : Dict = self._get_bucket_index(__magic_name__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : Tuple = self._get_next_ind(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
if self._try_set(__magic_name__ , __magic_name__ , __magic_name__ ):
break
def __setitem__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__magic_name__ , __magic_name__ )
def __delitem__(self , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(__magic_name__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , __magic_name__ ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self._len
def __iter__(self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 279
| 1
|
from math import ceil
def __UpperCAmelCase ( __a : int ,__a : Any ) -> Union[str, Any]:
"""simple docstring"""
_a : List[Any] = list(range(0 ,__a ) )
_a : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_a : Tuple = []
for i in device_map_blocks:
if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__a )
# Missing blocks
_a : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
_a : Dict = [i for i in device_map_blocks if i not in blocks]
if len(__a ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__a ) )
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ) -> List[str]:
"""simple docstring"""
_a : Any = list(range(__a ) )
_a : str = int(ceil(n_layers / len(__a ) ) )
_a : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,__a ,__a )]
return dict(zip(__a ,__a ) )
| 235
|
import logging
from transformers import PretrainedConfig
a__ = logging.getLogger(__name__)
a__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "bertabs"
def __init__( self , _a=3_0_5_2_2 , _a=5_1_2 , _a=6 , _a=5_1_2 , _a=8 , _a=5_1_2 , _a=0.2 , _a=6 , _a=7_6_8 , _a=8 , _a=2_0_4_8 , _a=0.2 , **_a , ) -> Any:
super().__init__(**_a )
_a : int = vocab_size
_a : List[str] = max_pos
_a : Tuple = enc_layers
_a : Optional[Any] = enc_hidden_size
_a : int = enc_heads
_a : Optional[Any] = enc_ff_size
_a : List[str] = enc_dropout
_a : Tuple = dec_layers
_a : Optional[Any] = dec_hidden_size
_a : Optional[Any] = dec_heads
_a : Optional[Any] = dec_ff_size
_a : List[Any] = dec_dropout
| 235
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
_UpperCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , """rb""" ) as f:
__lowerCAmelCase : List[str] = Image.open(SCREAMING_SNAKE_CASE )
return im.convert("""RGB""" )
@dataclass
class snake_case_ :
A_ = field(
default=__lowercase ,metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} ,)
A_ = field(
default=__lowercase ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A_ = field(default=__lowercase ,metadata={'help': 'A folder containing the training data.'} )
A_ = field(default=__lowercase ,metadata={'help': 'A folder containing the validation data.'} )
A_ = field(
default=0.15 ,metadata={'help': 'Percent to split off of train for validation.'} )
A_ = field(
default=__lowercase ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
A_ = field(
default=__lowercase ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def UpperCAmelCase__ ( self : Dict )->Optional[int]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class snake_case_ :
A_ = field(
default='google/vit-base-patch16-224-in21k' ,metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ,)
A_ = field(
default=__lowercase ,metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowercase )} ,)
A_ = field(
default=__lowercase ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ = field(
default=__lowercase ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A_ = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
A_ = field(default=__lowercase ,metadata={'help': 'Name or path of preprocessor config.'} )
A_ = field(
default=__lowercase ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
A_ = field(
default=__lowercase ,metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} ,)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> Optional[Any]:
__lowerCAmelCase : str = torch.stack([example["""pixel_values"""] for example in examples] )
__lowerCAmelCase : Dict = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowerCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase : Optional[Any] = {}
if data_args.train_dir is not None:
__lowerCAmelCase : int = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
__lowerCAmelCase : Tuple = os.path.join(data_args.validation_dir , """**""" )
__lowerCAmelCase : Union[str, Any] = load_dataset(
"""imagefolder""" , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase : Optional[Any] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__lowerCAmelCase : Any = dataset["""train"""].train_test_split(data_args.train_val_split )
__lowerCAmelCase : Union[str, Any] = split["""train"""]
__lowerCAmelCase : Optional[int] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase : str = dataset["""train"""].features["""labels"""].names
__lowerCAmelCase : Any = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = str(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase : Optional[Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE :Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : List[Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowerCAmelCase : Tuple = image_processor.size["""shortest_edge"""]
else:
__lowerCAmelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
__lowerCAmelCase : Optional[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowerCAmelCase : Dict = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowerCAmelCase : Optional[Any] = Compose(
[
Resize(SCREAMING_SNAKE_CASE ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE :Dict ):
__lowerCAmelCase : Optional[int] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE :List[Any] ):
__lowerCAmelCase : Optional[Any] = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__lowerCAmelCase : Dict = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__lowerCAmelCase : Optional[Any] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initalize our trainer
__lowerCAmelCase : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowerCAmelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase : Optional[int] = last_checkpoint
__lowerCAmelCase : List[str] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowerCAmelCase : int = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 350
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case_ ( __lowercase ):
A_ = 'unispeech-sat'
def __init__( self : str , _snake_case : List[Any]=32 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : Optional[int]=12 , _snake_case : Optional[Any]=3072 , _snake_case : Tuple="gelu" , _snake_case : int=0.1 , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.0 , _snake_case : List[str]=0.0 , _snake_case : int=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=0.02 , _snake_case : int=1E-5 , _snake_case : Dict="group" , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : int=(5, 2, 2, 2, 2, 2, 2) , _snake_case : int=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Any=False , _snake_case : Optional[Any]=128 , _snake_case : Tuple=16 , _snake_case : str=False , _snake_case : Dict=True , _snake_case : Tuple=0.05 , _snake_case : str=10 , _snake_case : Tuple=2 , _snake_case : List[Any]=0.0 , _snake_case : str=10 , _snake_case : Any=0 , _snake_case : List[Any]=320 , _snake_case : Union[str, Any]=2 , _snake_case : Dict=0.1 , _snake_case : Dict=100 , _snake_case : Union[str, Any]=256 , _snake_case : int=256 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]="mean" , _snake_case : int=False , _snake_case : str=False , _snake_case : str=256 , _snake_case : List[Any]=(512, 512, 512, 512, 1500) , _snake_case : Optional[int]=(5, 3, 3, 1, 1) , _snake_case : Tuple=(1, 2, 3, 1, 1) , _snake_case : Dict=512 , _snake_case : Union[str, Any]=0 , _snake_case : List[str]=1 , _snake_case : Optional[Any]=2 , _snake_case : Optional[int]=504 , **_snake_case : Optional[int] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : List[Any] = feat_extract_norm
__lowerCAmelCase : int = feat_extract_activation
__lowerCAmelCase : Union[str, Any] = list(_snake_case )
__lowerCAmelCase : str = list(_snake_case )
__lowerCAmelCase : Optional[Any] = list(_snake_case )
__lowerCAmelCase : Optional[int] = conv_bias
__lowerCAmelCase : Dict = num_conv_pos_embeddings
__lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
__lowerCAmelCase : Tuple = len(self.conv_dim )
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Optional[int] = hidden_dropout
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : int = activation_dropout
__lowerCAmelCase : Union[str, Any] = feat_proj_dropout
__lowerCAmelCase : List[str] = final_dropout
__lowerCAmelCase : Dict = layerdrop
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = num_clusters
__lowerCAmelCase : List[Any] = do_stable_layer_norm
__lowerCAmelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : Dict = apply_spec_augment
__lowerCAmelCase : List[Any] = mask_time_prob
__lowerCAmelCase : List[str] = mask_time_length
__lowerCAmelCase : Dict = mask_time_min_masks
__lowerCAmelCase : Tuple = mask_feature_prob
__lowerCAmelCase : List[str] = mask_feature_length
__lowerCAmelCase : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase : Optional[int] = num_codevectors_per_group
__lowerCAmelCase : List[Any] = num_codevector_groups
__lowerCAmelCase : int = contrastive_logits_temperature
__lowerCAmelCase : str = feat_quantizer_dropout
__lowerCAmelCase : int = num_negatives
__lowerCAmelCase : str = codevector_dim
__lowerCAmelCase : Any = proj_codevector_dim
__lowerCAmelCase : Any = diversity_loss_weight
# ctc loss
__lowerCAmelCase : Tuple = ctc_loss_reduction
__lowerCAmelCase : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : Optional[int] = list(_snake_case )
__lowerCAmelCase : Optional[int] = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 232
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = question_encoder
lowerCamelCase_ = generator
lowerCamelCase_ = self.question_encoder
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
if os.path.isfile(lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowercase , exist_ok=lowercase )
lowerCamelCase_ = os.path.join(lowercase , "question_encoder_tokenizer" )
lowerCamelCase_ = os.path.join(lowercase , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase , **lowercase ) -> Union[str, Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase_ = kwargs.pop("config" , lowercase )
if config is None:
lowerCamelCase_ = RagConfig.from_pretrained(lowercase )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ) -> Dict:
return self.current_tokenizer(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[Any]:
return self.generator.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]:
return self.generator.decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.question_encoder
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.generator
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ) -> BatchEncoding:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowercase , )
if max_length is None:
lowerCamelCase_ = self.current_tokenizer.model_max_length
lowerCamelCase_ = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase_ = self.current_tokenizer.model_max_length
lowerCamelCase_ = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
lowerCamelCase_ = labels["input_ids"]
return model_inputs
| 19
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 0
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A = 'pt'
elif is_tf_available():
_A = 'tf'
else:
_A = 'jax'
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = PerceiverTokenizer
UpperCAmelCase__ : Optional[Any] = False
def _a ( self ) -> Dict:
super().setUp()
__UpperCamelCase =PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ) -> int:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def _a ( self , **A_ ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ , A_=False , A_=20 , A_=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCamelCase =[]
for i in range(len(A_ ) ):
try:
__UpperCamelCase =tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase =list(filter(lambda A_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , A_ ) )
__UpperCamelCase =list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
__UpperCamelCase =toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
__UpperCamelCase =toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase =[t[0] for t in toks]
# Ensure consistency
__UpperCamelCase =tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
__UpperCamelCase =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
__UpperCamelCase =' ' + output_txt
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.perceiver_tokenizer
__UpperCamelCase ='Unicode €.'
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =[4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , A_ )
# decoding
__UpperCamelCase =tokenizer.decode(A_ )
self.assertEqual(A_ , '[CLS]Unicode €.[SEP]' )
__UpperCamelCase =tokenizer('e è é ê ë' )
__UpperCamelCase =[4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , A_ )
# decoding
__UpperCamelCase =tokenizer.decode(A_ )
self.assertEqual(A_ , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.perceiver_tokenizer
__UpperCamelCase =['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__UpperCamelCase =[4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__UpperCamelCase =tokenizer(A_ , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
if FRAMEWORK != "jax":
__UpperCamelCase =list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase =list(batch.input_ids.tolist()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.perceiver_tokenizer
__UpperCamelCase =['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase =tokenizer(A_ , padding=A_ , return_tensors=A_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , A_ )
self.assertIn('attention_mask' , A_ )
self.assertNotIn('decoder_input_ids' , A_ )
self.assertNotIn('decoder_attention_mask' , A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.perceiver_tokenizer
__UpperCamelCase =[
'Summary of the text.',
'Another summary.',
]
__UpperCamelCase =tokenizer(
text_target=A_ , max_length=32 , padding='max_length' , truncation=A_ , return_tensors=A_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _a ( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =' He is very happy, UNwant\u00E9d,running'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__UpperCamelCase =tokenizer.__class__.from_pretrained(A_ )
__UpperCamelCase =after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
shutil.rmtree(A_ )
__UpperCamelCase =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__UpperCamelCase =tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__UpperCamelCase =tokenizer.__class__.from_pretrained(A_ )
__UpperCamelCase =after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCamelCase =tokenizer.__class__.from_pretrained(A_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A_ )
def _a ( self ) -> Any:
__UpperCamelCase =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase =json.load(A_ )
with open(os.path.join(A_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase =json.load(A_ )
__UpperCamelCase =[f'<extra_id_{i}>' for i in range(125 )]
__UpperCamelCase =added_tokens_extra_ids + [
'an_additional_special_token'
]
__UpperCamelCase =added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(A_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(A_ , A_ )
with open(os.path.join(A_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(A_ , A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase =tokenizer_class.from_pretrained(
A_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase =added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=A_ )]
__UpperCamelCase =tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _a ( self ) -> Dict:
__UpperCamelCase =self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def _a ( self ) -> Union[str, Any]:
pass
def _a ( self ) -> Optional[Any]:
pass
def _a ( self ) -> int:
pass
def _a ( self ) -> List[Any]:
pass
def _a ( self ) -> Optional[Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__UpperCamelCase =self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase =['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__UpperCamelCase =tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(A_ , A_ )
| 62
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
__UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' )
__UpperCamelCase =soup.find_all('td' , attrs='titleColumn' )
__UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
__UpperCamelCase =get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file:
__UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[int]=3 , _SCREAMING_SNAKE_CASE: Union[str, Any]=18 , _SCREAMING_SNAKE_CASE: Tuple=30 , _SCREAMING_SNAKE_CASE: Optional[Any]=400 , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: List[Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _SCREAMING_SNAKE_CASE: Dict=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _SCREAMING_SNAKE_CASE: Optional[int]=True , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_convert_rgb
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: List[Any]=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCamelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCamelCase_ = []
for i in range(self.batch_size ):
UpperCamelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCamelCase_ = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCamelCase_ = [torch.from_numpy(snake_case__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _UpperCamelCase ( A_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case__ )
@property
def lowercase ( self: Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase ( self: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase ( self: int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _UpperCamelCase ( A_ , unittest.TestCase ):
_UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case__ )
UpperCamelCase_ = 3
@property
def lowercase ( self: str ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self: Dict ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def lowercase ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase ( self: int ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 360
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_UpperCamelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_UpperCamelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_UpperCamelCase )
return parser.parse_args()
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = parse_args()
# Import training_script as a module.
snake_case_ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : Tuple = script_fpath.stem
snake_case_ : List[str] = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
snake_case_ : Dict = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 279
|
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) )
snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
snake_case_ : float = 0
snake_case_ : list[float] = [0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
snake_case_ : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case_ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = PhobertTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
_snake_case : Union[str, Any] = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : int = ["""#version: 0.2""", """l à</w>"""]
_snake_case : Optional[Any] = {"""unk_token""": """<unk>"""}
_snake_case : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Optional[int], **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: str, a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """Tôi là VinAI Research"""
_snake_case : List[str] = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Union[str, Any] = """Tôi là VinAI Research"""
_snake_case : Tuple = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
_snake_case : Dict = tokenizer.tokenize(a_ )
print(a_ )
self.assertListEqual(a_, a_ )
_snake_case : int = tokens + [tokenizer.unk_token]
_snake_case : Dict = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
| 132
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
_snake_case : Dict = set({"""(""", """[""", """{"""} )
_snake_case : Union[str, Any] = set({""")""", """]""", """}"""} )
_snake_case : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(snake_case__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(snake_case__ ) == 0 or (len(snake_case__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(snake_case__ ) == 0
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = input("""Enter sequence of brackets: """ )
if is_balanced(snake_case__ ):
print(snake_case__ , """is balanced""" )
else:
print(snake_case__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 132
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case : Tuple = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = b.T
__snake_case : Tuple = np.sum(np.square(_lowerCamelCase ) , axis=1 )
__snake_case : str = np.sum(np.square(_lowerCamelCase ) , axis=0 )
__snake_case : int = np.matmul(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = x.reshape(-1 , 3 )
__snake_case : str = squared_euclidean_distance(_lowerCamelCase , _lowerCamelCase )
return np.argmin(_lowerCamelCase , axis=1 )
class a (__lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self : List[str] , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : bool = True , lowerCamelCase : bool = True , **lowerCamelCase : Any , ) -> None:
super().__init__(**lowerCamelCase )
__snake_case : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
__snake_case : str = get_size_dict(lowerCamelCase )
__snake_case : Union[str, Any] = np.array(lowerCamelCase ) if clusters is not None else None
__snake_case : Optional[Any] = do_resize
__snake_case : Tuple = size
__snake_case : Dict = resample
__snake_case : Any = do_normalize
__snake_case : Any = do_color_quantize
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : int , ) -> np.ndarray:
__snake_case : List[str] = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
lowerCamelCase , size=(size["height"], size["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
__snake_case : Tuple = rescale(image=lowerCamelCase , scale=1 / 1_27.5 , data_format=lowerCamelCase )
__snake_case : Any = image - 1
return image
def __snake_case ( self : str , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCamelCase : int , ) -> PIL.Image.Image:
__snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
__snake_case : int = size if size is not None else self.size
__snake_case : List[Any] = get_size_dict(lowerCamelCase )
__snake_case : Optional[Any] = resample if resample is not None else self.resample
__snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__snake_case : List[str] = clusters if clusters is not None else self.clusters
__snake_case : List[Any] = np.array(lowerCamelCase )
__snake_case : str = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__snake_case : Optional[Any] = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_normalize:
__snake_case : Optional[Any] = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
__snake_case : Dict = [to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__snake_case : List[str] = np.array(lowerCamelCase )
__snake_case : int = color_quantize(lowerCamelCase , lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__snake_case : Union[str, Any] = images.shape[0]
__snake_case : List[Any] = images.reshape(lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__snake_case : Tuple = list(lowerCamelCase )
else:
__snake_case : Dict = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__snake_case : int = {"input_ids": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 123
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 232
| 0
|
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 218
|
import argparse
import copy
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[1], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[0], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
with open(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.read(1 )
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = 0
while visiting not in first_solution:
SCREAMING_SNAKE_CASE__ = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
SCREAMING_SNAKE_CASE__ = k[1]
SCREAMING_SNAKE_CASE__ = k[0]
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = distance_of_first_solution + int(_A )
SCREAMING_SNAKE_CASE__ = best_node
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
SCREAMING_SNAKE_CASE__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for n in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
for kn in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
if n == kn:
continue
SCREAMING_SNAKE_CASE__ = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE__ = kn
SCREAMING_SNAKE_CASE__ = n
SCREAMING_SNAKE_CASE__ = 0
for k in _tmp[:-1]:
SCREAMING_SNAKE_CASE__ = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
SCREAMING_SNAKE_CASE__ = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
SCREAMING_SNAKE_CASE__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = first_solution
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = distance_of_first_solution
SCREAMING_SNAKE_CASE__ = solution
while count <= iters:
SCREAMING_SNAKE_CASE__ = find_neighborhood(_A , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
SCREAMING_SNAKE_CASE__ = len(_A ) - 1
SCREAMING_SNAKE_CASE__ = False
while not found:
SCREAMING_SNAKE_CASE__ = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
SCREAMING_SNAKE_CASE__ = best_solution[i]
SCREAMING_SNAKE_CASE__ = solution[i]
break
SCREAMING_SNAKE_CASE__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = best_solution[:-1]
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
SCREAMING_SNAKE_CASE__ = cost
SCREAMING_SNAKE_CASE__ = solution
else:
SCREAMING_SNAKE_CASE__ = index_of_best_solution + 1
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
SCREAMING_SNAKE_CASE__ = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase_ ( _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = generate_neighbours(args.File )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = generate_first_solution(
args.File , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 218
| 1
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[int]:
_a : List[Any] = 0
_a : Any = [0]
_a : str = [0]
_a : List[Any] = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 0 )
_a : Any = [60]
_a : Union[str, Any] = [10]
_a : List[str] = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 0 )
def _lowercase ( self : Optional[int] ) -> str:
_a : Optional[int] = 3
_a : Any = [1, 2, 3]
_a : Union[str, Any] = [3, 2, 1]
_a : Dict = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 5 )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : str = 50
_a : Optional[int] = [60, 100, 120]
_a : Tuple = [10, 20, 30]
_a : Optional[int] = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 294
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294
| 1
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCAmelCase : int = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_lowerCAmelCase : Optional[Any] = dataset.iloc[:, 1:2].values
_lowerCAmelCase : Optional[Any] = dataset.iloc[:, 2].values
_lowerCAmelCase : int = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCAmelCase : str = PolynomialFeatures(degree=4)
_lowerCAmelCase : int = poly_reg.fit_transform(X)
_lowerCAmelCase : Any = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
plt.scatter(snake_case , snake_case , color="red" )
plt.plot(snake_case , pol_reg.predict(poly_reg.fit_transform(snake_case ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 370
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328
| 0
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase ) -> list:
if len(__lowerCAmelCase ) == 0:
return []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = min(__lowerCAmelCase ), max(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = int(max_value - min_value ) + 1
SCREAMING_SNAKE_CASE__ : list[list] = [[] for _ in range(__lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(__lowerCAmelCase )
return [v for bucket in buckets for v in sorted(__lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 132
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE__ : Optional[int] = test_metrics
@require_cpu
def _a ( self ) -> List[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ) -> int:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 132
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_tensor(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = gather(SCREAMING_SNAKE_CASE__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [state.process_index]
SCREAMING_SNAKE_CASE__ : int = gather_object(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == state.num_processes, f'''{gathered_obj}, {len(SCREAMING_SNAKE_CASE__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = broadcast(SCREAMING_SNAKE_CASE__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : str = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : str = pad_across_processes(SCREAMING_SNAKE_CASE__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Tuple = create_tensor(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = reduce(SCREAMING_SNAKE_CASE__ , "sum" )
SCREAMING_SNAKE_CASE__ : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : str = create_tensor(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = reduce(SCREAMING_SNAKE_CASE__ , "mean" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
'''simple docstring'''
main()
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print("testing gather" )
test_gather(SCREAMING_SNAKE_CASE__ )
state.print("testing gather_object" )
test_gather_object(SCREAMING_SNAKE_CASE__ )
state.print("testing broadcast" )
test_broadcast(SCREAMING_SNAKE_CASE__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(SCREAMING_SNAKE_CASE__ )
state.print("testing reduce_sum" )
test_reduce_sum(SCREAMING_SNAKE_CASE__ )
state.print("testing reduce_mean" )
test_reduce_mean(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 191
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = symbols(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__ ) / diff_function(
SCREAMING_SNAKE_CASE__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 191
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCAmelCase : Optional[Any] = Lock()
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[str] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__a =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__a =min(_snake_case , _snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__a =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__a =max(_snake_case , _snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_snake_case )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
__a =[]
__a =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__a =Pipe()
__a =Pipe()
process_array_.append(
Process(
target=_snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__a =temp_rs
__a =temp_rr
for i in range(1 , len(_snake_case ) - 1 ):
__a =Pipe()
__a =Pipe()
process_array_.append(
Process(
target=_snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__a =temp_rs
__a =temp_rr
process_array_.append(
Process(
target=_snake_case , args=(
len(_snake_case ) - 1,
arr[len(_snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_snake_case ) ):
__a =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ):
"""simple docstring"""
__a =list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_snake_case )
__a =odd_even_transposition(_snake_case )
print('Sorted List\n' )
print(*_snake_case )
if __name__ == "__main__":
main()
| 218
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218
| 1
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a : List[str] = flax_key_tuple[:-1] + ("weight",)
a : str = torch.permute(A_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A_ ):
# linear layer
a : str = flax_key_tuple[:-1] + ("weight",)
a : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def lowercase ( A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if "metadata" in layer:
a : Union[str, Any] = layer.split("metadata" )
a : Optional[Any] = "".join(split_layer[0] )[:-1]
a : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
a : Dict = layer.split("kvstore" )
a : Tuple = "".join(split_layer[0] )[:-1]
a : List[str] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
a : str = layer.split("/" )
a : Optional[int] = "/".join(split_layer[:-1] )
a : List[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
a : Optional[Any] = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
a : int = "file"
else:
a : Union[str, Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
a : Dict = rename_keys(A_ )
a : str = {}
for k, v in current_block.items():
a : int = v
a : Any = new_current_block
torch.save(A_ , A_ )
def lowercase ( A_ , A_ , A_ , A_ , A_ = WEIGHTS_NAME )-> Optional[Any]:
'''simple docstring'''
a : Tuple = convert_file_size_to_int(A_ )
a : Union[str, Any] = []
a : Optional[int] = {}
a : List[str] = 0
a : Dict = 0
os.makedirs(A_ , exist_ok=A_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
a : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
a : List[Any] = flatten_dict(A_ , sep="/" )
a : Dict = {}
for layer in checkpoint_info.keys():
a , a , a : List[str] = get_key_and_tensorstore_dict(
A_ , A_ , A_ )
if curr_real_layer_name in all_layers:
a : int = content
else:
a : List[str] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a : Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a : Any = torch.tensor(A_ )
a : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a : Any = rename_base_flax_keys(tuple(key.split("/" ) ) , A_ )
a : Optional[int] = "/".join(A_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a : Union[str, Any] = os.path.join(
A_ , weights_name.replace(".bin" , F'''-{len(A_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(A_ , A_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
a : Union[str, Any] = {}
a : Any = 0
a : Tuple = raw_weights.to(getattr(A_ , A_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a : str = os.path.join(A_ , weights_name.replace(".bin" , F'''-{len(A_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(A_ , A_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(A_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a : int = {}
a : int = {}
for idx, shard in enumerate(A_ ):
a : str = weights_name.replace(
".bin" , F'''-{idx+1:05d}-of-{len(A_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
a : List[str] = os.path.join(A_ , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A_ , os.path.join(A_ , A_ ) )
a : Any = shard
for key in shard:
a : Tuple = shard_file
# Add the metadata
a : List[Any] = {"total_size": total_size}
a : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A_ , A_ ) , "w" , encoding="utf-8" ) as f:
a : Dict = json.dumps(A_ , indent=2 , sort_keys=A_ ) + "\n"
f.write(A_ )
return metadata, index
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__lowercase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase ( )-> Optional[Any]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a : Dict = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
a : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
a : List[Any] = TaTokenizer.from_pretrained("t5-small" )
a : List[str] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
a : int = tokenizer(A_ , return_tensors="pt" ).input_ids
a : Optional[Any] = model.generate(A_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 226
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """gpt_neox"""
def __init__( self : List[str] , __UpperCAmelCase : Tuple=50432 , __UpperCAmelCase : str=6144 , __UpperCAmelCase : Any=44 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=24576 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : List[Any]=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=1e-5 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : Tuple , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[int] = max_position_embeddings
a : List[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : int = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_act
a : Dict = rotary_pct
a : Any = rotary_emb_base
a : Dict = attention_dropout
a : List[str] = hidden_dropout
a : List[str] = classifier_dropout
a : Any = initializer_range
a : Union[str, Any] = layer_norm_eps
a : int = use_cache
a : int = tie_word_embeddings
a : str = use_parallel_residual
a : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!")
def __snake_case ( self : Any):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''')
a : str = self.rope_scaling.get("type" , __UpperCAmelCase)
a : List[str] = self.rope_scaling.get("factor" , __UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 226
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Tuple =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__ : str = 192
lowerCamelCase__ : Optional[int] = 768
lowerCamelCase__ : Dict = 12
lowerCamelCase__ : List[str] = 3
lowerCamelCase__ : Optional[Any] = [800, 1333]
lowerCamelCase__ : List[Any] = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Any = 330
lowerCamelCase__ : List[Any] = 14
lowerCamelCase__ : Union[str, Any] = 6
lowerCamelCase__ : Optional[int] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase__ : Union[str, Any] = 384
lowerCamelCase__ : Union[str, Any] = 1536
lowerCamelCase__ : Union[str, Any] = 12
lowerCamelCase__ : Optional[Any] = 6
elif "yolos_b" in yolos_name:
lowerCamelCase__ : Tuple = [800, 1344]
lowerCamelCase__ : List[Any] = 91
lowerCamelCase__ : int = "huggingface/label-files"
lowerCamelCase__ : List[Any] = "coco-detection-id2label.json"
lowerCamelCase__ : Dict = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Any = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Dict = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Tuple = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
if "backbone" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
lowerCamelCase__ : Dict = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
lowerCamelCase__ : Tuple = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase__ : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
lowerCamelCase__ : List[str] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
lowerCamelCase__ : Dict = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
lowerCamelCase__ : Dict = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Any = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
lowerCamelCase__ : Optional[Any] = key.split(""".""" )
lowerCamelCase__ : List[Any] = int(key_split[2] )
lowerCamelCase__ : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__ : Optional[int] = val[:dim, :]
lowerCamelCase__ : Dict = val[
dim : dim * 2, :
]
lowerCamelCase__ : Optional[int] = val[-dim:, :]
else:
lowerCamelCase__ : Optional[int] = val[:dim]
lowerCamelCase__ : Any = val[dim : dim * 2]
lowerCamelCase__ : Union[str, Any] = val[-dim:]
else:
lowerCamelCase__ : Any = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> Optional[Any]:
lowerCamelCase__ : int = get_yolos_config(snake_case__ )
# load original state_dict
lowerCamelCase__ : int = torch.load(snake_case__ , map_location="""cpu""" )["model"]
# load 🤗 model
lowerCamelCase__ : Tuple = YolosForObjectDetection(snake_case__ )
model.eval()
lowerCamelCase__ : Optional[int] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__ : Tuple = 800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__ : str = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
lowerCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : List[Any] = model(**snake_case__ )
lowerCamelCase__ : List[Any] = outputs.logits, outputs.pred_boxes
lowerCamelCase__ : Optional[int] = None, None
if yolos_name == "yolos_ti":
lowerCamelCase__ : List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__ : int = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowerCamelCase__ : Any = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowerCamelCase__ : Dict = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowerCamelCase__ : Dict = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowerCamelCase__ : Tuple = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1E-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
lowerCamelCase__ : Union[str, Any] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("""Pushing to the hub...""" )
lowerCamelCase__ : Dict = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : Dict =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 41
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 0
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if number != int(__lowercase ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
A: int = [-1] * (number + 1)
A: List[str] = 0
for i in range(1 , number + 1 ):
A: Tuple = sys.maxsize
A: Optional[int] = int(math.sqrt(__lowercase ) )
for j in range(1 , root + 1 ):
A: str = 1 + answers[i - (j**2)]
A: Dict = min(__lowercase , __lowercase )
A: str = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int:
'''simple docstring'''
A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE_ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE_ )
[x.remove() for x in self.handles]
return self
@property
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : nn.Module
UpperCamelCase_ : int = 0
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]:
'''simple docstring'''
A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized
A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized
A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) )
A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while"""
f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any:
print(F"""Converting {name}...""" )
with torch.no_grad():
A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval()
A: List[str] = ResNetForImageClassification(__lowercase ).eval()
A: int = ModuleTransfer(src=__lowercase , dest=__lowercase )
A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowercase )
assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one."
A: str = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__lowercase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , )
# we can use the convnext one
A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]:
A: Union[str, Any] = '''imagenet-1k-id2label.json'''
A: Union[str, Any] = 1_0_0_0
A: Optional[int] = (1, num_labels)
A: Dict = '''huggingface/label-files'''
A: Any = num_labels
A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Optional[int] = idalabel
A: List[str] = {v: k for k, v in idalabel.items()}
A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
A: Optional[Any] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a ( unittest.TestCase ):
def A_ ( self : Tuple ):
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
snake_case_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
snake_case_ = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase_ , lowercase_ )
def A_ ( self : Tuple , **lowercase_ : Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase_ )
def A_ ( self : int , **lowercase_ : int ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowercase_ )
def A_ ( self : Dict , **lowercase_ : Optional[Any] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def A_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : Tuple ):
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ = self.get_image_processor(do_normalize=lowercase_ )
snake_case_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def A_ ( self : str ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowercase_ , return_tensors='''np''' )
snake_case_ = processor(images=lowercase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = '''lower newer'''
snake_case_ = processor(text=lowercase_ , return_tensors='''np''' )
snake_case_ = tokenizer(lowercase_ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def A_ ( self : Any ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = '''lower newer'''
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : int ):
snake_case_ = '''google/owlvit-base-patch32'''
snake_case_ = OwlViTProcessor.from_pretrained(lowercase_ )
snake_case_ = ['''cat''', '''nasa badge''']
snake_case_ = processor(text=lowercase_ )
snake_case_ = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : str ):
snake_case_ = '''google/owlvit-base-patch32'''
snake_case_ = OwlViTProcessor.from_pretrained(lowercase_ )
snake_case_ = [['''cat''', '''nasa badge'''], ['''person''']]
snake_case_ = processor(text=lowercase_ )
snake_case_ = 16
snake_case_ = len(lowercase_ )
snake_case_ = max([len(lowercase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : Union[str, Any] ):
snake_case_ = '''google/owlvit-base-patch32'''
snake_case_ = OwlViTProcessor.from_pretrained(lowercase_ )
snake_case_ = ['''cat''', '''nasa badge''']
snake_case_ = processor(text=lowercase_ )
snake_case_ = 16
snake_case_ = inputs['''input_ids''']
snake_case_ = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def A_ ( self : List[str] ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = self.prepare_image_inputs()
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(images=lowercase_ , query_images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : Any ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowercase_ )
snake_case_ = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 56
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : int = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
a : Optional[int] = doctest.OutputChecker
class a ( _lowerCamelCase ):
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
a : List[Any] = CustomOutputChecker
a : Optional[int] = HfDoctestModule
a : Tuple = HfDocTestParser
| 56
| 1
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(__SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 264
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__SCREAMING_SNAKE_CASE ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 264
| 1
|
"""simple docstring"""
from math import isqrt, loga
def __lowerCamelCase ( a_ : int ) -> list[int]:
__SCREAMING_SNAKE_CASE :str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a_ , a_ ):
__SCREAMING_SNAKE_CASE :List[str] = False
return [i for i in range(2 , a_ ) if is_prime[i]]
def __lowerCamelCase ( a_ : int = 80_08_00 , a_ : int = 80_08_00 ) -> int:
__SCREAMING_SNAKE_CASE :Tuple = degree * loga(a_ )
__SCREAMING_SNAKE_CASE :Dict = int(a_ )
__SCREAMING_SNAKE_CASE :str = calculate_prime_numbers(a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = 0
__SCREAMING_SNAKE_CASE :str = 0
__SCREAMING_SNAKE_CASE :Union[str, Any] = len(a_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'{solution() = }')
| 191
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Tuple , a_ : str=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__SCREAMING_SNAKE_CASE :Dict = nn.Parameter(a_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__SCREAMING_SNAKE_CASE :Optional[int] = nn.Parameter(a_ )
def __lowerCamelCase ( a_ : Dict , a_ : str , a_ : Optional[int] ) -> Any:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : List[str] ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Any = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE :Dict = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : Any , a_ : List[str] , a_ : Optional[int] ) -> Union[str, Any]:
# layernorm 1
__SCREAMING_SNAKE_CASE :Any = weights[0][0][0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE :List[Any] = weights[0][1]
if len(a_ ) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_ )
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_ )
# intermediate weighs
__SCREAMING_SNAKE_CASE :List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_ ) == 4:
__SCREAMING_SNAKE_CASE :List[str] = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE :Tuple = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : List[str] , a_ : str , a_ : List[Any] ) -> Optional[Any]:
# reformer model
__SCREAMING_SNAKE_CASE :Dict = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_ ) , )
if isinstance(weights[3] , a_ ):
__SCREAMING_SNAKE_CASE :List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE :List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__SCREAMING_SNAKE_CASE :str = nn.Parameter(torch.tensor(a_ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
a_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE :Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_ )
# output layer norm
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : Any , a_ : Dict , a_ : Dict ) -> Tuple:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE :List[str] = ReformerConfig.from_json_file(a_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE :List[Any] = ReformerModelWithLMHead(a_ )
with open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Any = pickle.load(a_ )['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 191
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
__lowerCamelCase = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class A__ ( _snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = BertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase__ ) != tokenize_chinese_chars
):
A_ = getattr(UpperCamelCase__ , normalizer_state.pop("""type""" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCamelCase__ )
A_ = do_lower_case
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> str:
'''simple docstring'''
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
A_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 367
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if not postfix_notation:
return 0
A_ = {"""+""", """-""", """*""", """/"""}
A_ = []
for token in postfix_notation:
if token in operations:
A_ , A_ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 0
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=None ):
'''simple docstring'''
require_version(deps[pkg] , _UpperCAmelCase )
| 226
|
from __future__ import annotations
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Optional[int]=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = data
__UpperCAmelCase : Any = None
def __repr__( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Any = self
while temp:
string_rep.append(F'{temp.data}' )
__UpperCAmelCase : Dict = temp.next
return "->".join(a_ )
def a ( _UpperCAmelCase : list ):
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
__UpperCAmelCase : int = Node(elements_list[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
__UpperCAmelCase : Any = Node(elements_list[i] )
__UpperCAmelCase : Optional[int] = current.next
return head
def a ( _UpperCAmelCase : Node ):
'''simple docstring'''
if head_node is not None and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def a ( ):
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCAmelCase : Tuple = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(_UpperCAmelCase )
print('''Elements in Reverse:''' )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 226
| 1
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
A__ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
A__ = f"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase__ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
"""simple docstring"""
A__ = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
A__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A__ = [
get_openlibrary_data(author['''key'''] )["""name"""] for author in data["""Authors"""]
]
A__ = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase__ , lowercase__ ):
A__ = """, """.join(lowercase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowerCamelCase : int = input("""\nEnter the ISBN code to search (or \'quit\' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_lowerCamelCase : Tuple = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("""\n""".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 351
|
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0) ->None:
'''simple docstring'''
A__ , A__ = row, column
A__ = [[default_value for c in range(UpperCAmelCase__)] for r in range(UpperCAmelCase__)]
def __str__( self : List[str]) ->str:
'''simple docstring'''
A__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
A__ = 0
for row_vector in self.array:
for obj in row_vector:
A__ = max(UpperCAmelCase__ , len(str(UpperCAmelCase__)))
A__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float]) -> str:
nonlocal string_format_identifier
A__ = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__) for row_vector in self.array)
return s
def __repr__( self : Tuple) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase__ , (list, tuple)) and len(UpperCAmelCase__) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
A__ = value
def __add__( self : Optional[int] , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == another.row and self.column == another.column
# Add
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] + another[r, c]
return result
def __neg__( self : str) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self : Union[str, Any] , UpperCAmelCase__ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , (int, float)): # Scalar multiplication
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # Matrix multiplication
assert self.column == another.row
A__ = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ = f"""Unsupported type given for another ({type(UpperCAmelCase__)})"""
raise TypeError(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ = v.transpose()
A__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
A__ = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 1, 2, -3
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ , lowercase_ )}""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 231
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a: str = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase__ : Tuple = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowercase__ : str = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase__ : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase__ : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = dct.pop(lowerCAmelCase_ )
lowercase__ : Any = val
def __UpperCamelCase ( UpperCAmelCase ):
if "handwritten" in checkpoint_url:
lowercase__ : Optional[int] = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : List[Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowercase__ : Optional[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=lowerCAmelCase_ )
lowercase__ : Optional[int] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase__ : Dict = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : Optional[int] = 24
lowercase__ : Optional[Any] = 16
lowercase__ : Optional[int] = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : Tuple = False
lowercase__ : str = '''relu'''
lowercase__ : Any = 1024
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
# load HuggingFace model
lowercase__ : str = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ )
lowercase__ : int = TrOCRForCausalLM(lowerCAmelCase_ )
lowercase__ : Optional[int] = VisionEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
model.eval()
# load state_dict of original model, rename some keys
lowercase__ : int = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' , check_hash=lowerCAmelCase_ )['''model''']
lowercase__ : Tuple = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase__ : Dict = state_dict.pop(lowerCAmelCase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowercase__ : str = val
else:
lowercase__ : Union[str, Any] = val
# load state dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
lowercase__ : Tuple = ViTImageProcessor(size=encoder_config.image_size )
lowercase__ : int = RobertaTokenizer.from_pretrained('''roberta-large''' )
lowercase__ : Tuple = TrOCRProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : List[Any] = processor(images=prepare_img(lowerCAmelCase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
lowercase__ : int = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase__ : Any = model(pixel_values=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ )
lowercase__ : List[str] = outputs.logits
lowercase__ : List[Any] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase__ : Tuple = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase__ : List[Any] = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowercase__ : Dict = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowercase__ : int = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCAmelCase_ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__a: List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__a: Dict = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 198
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334
| 0
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmark(args=__lowerCAmelCase )
try:
SCREAMING_SNAKE_CASE__ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE__ : List[str] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
SCREAMING_SNAKE_CASE__ : Any = """ """.join(str(__lowerCAmelCase ).split(""" """ )[:-1] )
SCREAMING_SNAKE_CASE__ : int = """"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = eval(str(__lowerCAmelCase ).split(""" """ )[-1] )
SCREAMING_SNAKE_CASE__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = full_error_msg + begin_error_msg + str(__lowerCAmelCase )
raise ValueError(__lowerCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 56
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> int:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 56
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Any = ["""image_processor""", """tokenizer"""]
_lowerCAmelCase : Optional[Any] = """ViTImageProcessor"""
_lowerCAmelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , lowercase_ : Any=None , lowercase_ : str=None , **lowercase_ : Tuple ):
snake_case_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
snake_case_ : Tuple = kwargs.pop('''feature_extractor''' )
snake_case_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Tuple , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : str=None , **lowercase_ : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
snake_case_ : Tuple = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None:
snake_case_ : Tuple = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None and images is not None:
snake_case_ : str = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case_ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case_ : Optional[int] = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _snake_case ( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _snake_case ( self : Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def _snake_case ( self : Optional[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 264
|
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """levit"""
def __init__( self : List[str] , lowercase_ : Optional[Any]=224 , lowercase_ : int=3 , lowercase_ : str=3 , lowercase_ : Optional[int]=2 , lowercase_ : Any=1 , lowercase_ : Optional[Any]=16 , lowercase_ : Tuple=[128, 256, 384] , lowercase_ : Any=[4, 8, 12] , lowercase_ : Any=[4, 4, 4] , lowercase_ : List[Any]=[16, 16, 16] , lowercase_ : Any=0 , lowercase_ : int=[2, 2, 2] , lowercase_ : Tuple=[2, 2, 2] , lowercase_ : Dict=0.02 , **lowercase_ : Any , ) -> Dict:
super().__init__(**lowercase_ )
UpperCAmelCase : Dict = image_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : List[str] = kernel_size
UpperCAmelCase : Any = stride
UpperCAmelCase : str = padding
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : List[str] = depths
UpperCAmelCase : str = key_dim
UpperCAmelCase : str = drop_path_rate
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : Union[str, Any] = attention_ratio
UpperCAmelCase : Any = mlp_ratio
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : List[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> float:
return 1E-4
| 368
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Union[str, Any]=7 , lowercase_ : Union[str, Any]=3 , lowercase_ : int=30 , lowercase_ : Tuple=400 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0.9 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Tuple:
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 30}
UpperCAmelCase : int = crop_size if crop_size is not None else {'height': 30, 'width': 30}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : int = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize_and_center_crop
UpperCAmelCase : int = size
UpperCAmelCase : Dict = crop_pct
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
def UpperCAmelCase_ ( self : str ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Any = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : str = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : str ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 280
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ : str = "#"
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE__ : int = {}
SCREAMING_SNAKE_CASE__ : Dict = trie[char]
SCREAMING_SNAKE_CASE__ : Tuple = True
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE__ : str = trie[char]
else:
return []
return self._elements(A__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for c, v in d.items():
SCREAMING_SNAKE_CASE__ : Dict = [""" """] if c == END else [(c + s) for s in self._elements(A__ )]
result.extend(A__ )
return tuple(A__ )
UpperCAmelCase__ : Union[str, Any] = Trie()
UpperCAmelCase__ : int = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = trie.find_word(lowerCAmelCase__ )
return tuple(string + word for word in suffixes )
def lowercase_ ( ):
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 25
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowercase = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
lowercase = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f'bert/{name}'
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = tf.dtypes.as_dtype(tensor.dtype )
lowercase = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase = to_tf_var_name(lowerCAmelCase__ )
lowercase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase = torch_tensor.T
lowercase = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = session.run(lowerCAmelCase__ )
print(f'Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}' )
lowercase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
lowercase = parser.parse_args(lowerCAmelCase__ )
lowercase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 101
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LEDTokenizer
__UpperCamelCase = LEDTokenizerFast
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
super().setUp()
UpperCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase = {'unk_token': '<unk>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def UpperCAmelCase__ ( self :List[Any] , **lowercase_ :List[Any] ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , **lowercase_ :str ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :str ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ ( self :Tuple ) -> List[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors='pt' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def UpperCAmelCase__ ( self :str ) -> List[str]:
UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowercase_ )
self.assertIn('attention_mask' , lowercase_ )
self.assertNotIn('labels' , lowercase_ )
self.assertNotIn('decoder_attention_mask' , lowercase_ )
@require_torch
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
UpperCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowercase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def UpperCAmelCase__ ( self :List[Any] ) -> Tuple:
UpperCAmelCase = ['A long paragraph for summarization.']
UpperCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowercase_ , return_tensors='pt' )
UpperCAmelCase = tokenizer(text_target=lowercase_ , return_tensors='pt' )
UpperCAmelCase = inputs['input_ids']
UpperCAmelCase = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ['Summary of the text.', 'Another summary.']
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowercase_ , padding=lowercase_ )
UpperCAmelCase = [[0] * len(lowercase_ ) for x in encoded_output['input_ids']]
UpperCAmelCase = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
pass
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
UpperCAmelCase = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 351
|
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase_ )
if number < 1:
UpperCAmelCase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase_ )
UpperCAmelCase = 1
for i in range(1 , lowercase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181
| 0
|
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
if num <= 0:
lowerCAmelCase_ = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__lowerCAmelCase )
lowerCAmelCase_ = [True] * (num + 1)
lowerCAmelCase_ = []
lowerCAmelCase_ = 2
lowerCAmelCase_ = int(math.sqrt(__lowerCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowerCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __lowerCAmelCase ):
if sieve[i] is True:
lowerCAmelCase_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__lowerCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 231
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231
| 1
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase: List[Any] = []
__lowerCAmelCase: int = set({"(", "[", "{"} )
__lowerCAmelCase: Optional[Any] = set({")", "]", "}"} )
__lowerCAmelCase: Optional[int] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__SCREAMING_SNAKE_CASE ) == 0 or (len(__SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__SCREAMING_SNAKE_CASE ) == 0
def a__ ( ) -> List[str]:
__lowerCAmelCase: str = input("Enter sequence of brackets: " )
if is_balanced(__SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(__SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 108
|
"""simple docstring"""
from math import ceil
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Tuple = list(range(0 , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
__lowerCAmelCase: Optional[Any] = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase: List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: List[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase: Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 108
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Optional[Any] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['PoolFormerFeatureExtractor']
a : List[Any] = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56
| 1
|
import math
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while num > 0:
lowerCamelCase_ = num % 8
lowerCamelCase_ = octal + (remainder * math.floor(math.pow(10 , lowercase ) ))
counter += 1
lowerCamelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(lowercase )}"""
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 208
|
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : Any , *A_ : Any , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : Tuple , *A_ : Dict , **A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : int , *A_ : Dict , **A_ : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 208
| 1
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowercase__ = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
lowercase__ = dataset.iloc[:, 1:2].values
lowercase__ = dataset.iloc[:, 2].values
lowercase__ = train_test_split(X, y, test_size=0.2, random_state=0)
lowercase__ = PolynomialFeatures(degree=4)
lowercase__ = poly_reg.fit_transform(X)
lowercase__ = LinearRegression()
pol_reg.fit(X_poly, y)
def __a ( ) ->Tuple:
plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='red' )
plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 290
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280
| 0
|
from __future__ import annotations
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = str(__lowerCamelCase )
return len(__lowerCamelCase ) == 9 and set(__lowerCamelCase ) == set('123456789' )
def lowercase__ ( ):
'''simple docstring'''
for base_num in range(9_999 , 4_999 , -1 ):
UpperCAmelCase_ : Tuple = 100_002 * base_num
if is_9_pandigital(__lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
UpperCAmelCase_ : int = 1_002_003 * base_num
if is_9_pandigital(__lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 351
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
UpperCAmelCase_ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
for example in examples:
UpperCAmelCase_ : str = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
] , )
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCAmelCase_ : str = pipeline(
'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
UpperCAmelCase_ : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ) -> Dict:
pass
| 145
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Dict ) -> List[str]:
a = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def __UpperCAmelCase ( self : Tuple ) -> Any:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = self.dummy_sample_deter + 0.1
a = self.dummy_sample_deter - 0.1
a = samplea.shape[0]
a = torch.stack([samplea, samplea, samplea] , dim=0 )
a = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
a = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
a = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
a = pred_prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type="v_prediction" )
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
a = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
a = pred_prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
a = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
a = -1
else:
a = timesteps[i + 1]
a = scheduler.previous_timestep(__lowerCamelCase )
a = prev_t.item()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 51, 0]
with self.assertRaises(__lowerCamelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 1, 0]
a = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
| 107
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : str = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : int = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : str = tempfile.mkdtemp()
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Tuple = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Dict , **_A : str ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , **_A : int ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : str = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : Optional[Any] = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Union[str, Any] = processor(text=_A )
UpperCAmelCase__ : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : str , _A : int=(2, 10, 16) , _A : Optional[int]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : Union[str, Any] = processor.decode(_A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : List[Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = self._get_dummy_logits()
UpperCAmelCase__ : Optional[Any] = 15
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : List[str] = -4.0
UpperCAmelCase__ : str = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : Any = -2_0.0
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Any = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[Any] = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Any = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : str = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[Any] = os.listdir(_A )
UpperCAmelCase__ : int = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : Tuple = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : Union[str, Any] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : List[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()[0]
UpperCAmelCase__ : int = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Any = iter(_A )
UpperCAmelCase__ : Dict = next(_A )
UpperCAmelCase__ : Optional[int] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : List[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : List[str] = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : List[str] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : Tuple = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : Dict = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : Any = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 181
| 0
|
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = AutoencoderKL
snake_case__ = "sample"
snake_case__ = 1E-2
@property
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = 4
__lowerCamelCase : Any = 3
__lowerCamelCase : Tuple = (32, 32)
__lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
return {"sample": image}
@property
def lowerCamelCase__ ( self : Dict ):
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : List[str] ):
return (3, 32, 32)
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def lowerCamelCase__ ( self : Dict ):
# enable deterministic behavior for gradient checkpointing
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase : Optional[int] = self.model_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
assert not model.is_gradient_checkpointing and model.training
__lowerCamelCase : Any = model(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__lowerCamelCase : List[Any] = torch.randn_like(UpperCAmelCase )
__lowerCamelCase : Any = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__lowerCamelCase : Tuple = self.model_class(**UpperCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__lowerCamelCase : Optional[int] = model_a(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__lowerCamelCase : int = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__lowerCamelCase : List[str] = dict(model.named_parameters() )
__lowerCamelCase : str = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase , __lowerCamelCase : int = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
__lowerCamelCase : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__lowerCamelCase : List[Any] = model.to(UpperCAmelCase )
model.eval()
if torch_device == "mps":
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
__lowerCamelCase : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : List[Any] = image.to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(UpperCAmelCase , sample_posterior=UpperCAmelCase , generator=UpperCAmelCase ).sample
__lowerCamelCase : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__lowerCamelCase : List[Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__lowerCamelCase : str = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__lowerCamelCase : Optional[int] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1E-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
return F"""gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase ) for s in shape] )}.npy"""
def lowerCamelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : str=0 , UpperCAmelCase : List[Any]=(4, 3, 512, 512) , UpperCAmelCase : str=False ):
__lowerCamelCase : Any = torch.floataa if fpaa else torch.floataa
__lowerCamelCase : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) ).to(UpperCAmelCase ).to(UpperCAmelCase )
return image
def lowerCamelCase__ ( self : int , UpperCAmelCase : int="CompVis/stable-diffusion-v1-4" , UpperCAmelCase : Union[str, Any]=False ):
__lowerCamelCase : Dict = "fp16" if fpaa else None
__lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
__lowerCamelCase : Dict = AutoencoderKL.from_pretrained(
UpperCAmelCase , subfolder="vae" , torch_dtype=UpperCAmelCase , revision=UpperCAmelCase , )
model.to(UpperCAmelCase ).eval()
return model
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Union[str, Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase )
return torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] ):
__lowerCamelCase : str = self.get_sd_vae_model()
__lowerCamelCase : int = self.get_sd_image(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = self.get_generator(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
__lowerCamelCase : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCamelCase : Tuple = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : List[str] = self.get_sd_vae_model(fpaa=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = self.get_sd_image(UpperCAmelCase , fpaa=UpperCAmelCase )
__lowerCamelCase : int = self.get_generator(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Any = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
__lowerCamelCase : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCamelCase : int = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ):
__lowerCamelCase : Optional[Any] = self.get_sd_vae_model()
__lowerCamelCase : Tuple = self.get_sd_image(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Dict = model(UpperCAmelCase ).sample
assert sample.shape == image.shape
__lowerCamelCase : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCamelCase : Tuple = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
__lowerCamelCase : int = self.get_sd_vae_model()
__lowerCamelCase : Any = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCamelCase : Dict = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__lowerCamelCase : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
__lowerCamelCase : List[str] = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def lowerCamelCase__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Tuple = self.get_sd_vae_model(fpaa=UpperCAmelCase )
__lowerCamelCase : Any = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__lowerCamelCase : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCamelCase : Any = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Union[str, Any] = self.get_sd_vae_model(fpaa=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : str = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : str ):
__lowerCamelCase : int = self.get_sd_vae_model()
__lowerCamelCase : List[str] = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : List[str] = self.get_sd_vae_model()
__lowerCamelCase : Optional[int] = self.get_sd_image(UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_generator(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase : int = model.encode(UpperCAmelCase ).latent_dist
__lowerCamelCase : Tuple = dist.sample(generator=UpperCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__lowerCamelCase : str = sample[0, -1, -3:, -3:].flatten().cpu()
__lowerCamelCase : Optional[int] = torch.tensor(UpperCAmelCase )
__lowerCamelCase : str = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase )
| 64
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase_ ( _lowerCamelCase: str ) -> int:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCamelCase : int = model_type_to_module_name(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , "__name__" , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCamelCase : int = importlib.import_module("transformers" )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def lowercase_ ( _lowerCamelCase: Union[str, os.PathLike] , _lowerCamelCase: Optional[Union[str, os.PathLike]] = None , _lowerCamelCase: bool = False , _lowerCamelCase: bool = False , _lowerCamelCase: Optional[Dict[str, str]] = None , _lowerCamelCase: Optional[Union[bool, str]] = None , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: bool = False , **_lowerCamelCase: Tuple , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[str] = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(_lowerCamelCase , encoding="utf-8" ) as reader:
return json.load(_lowerCamelCase )
class _snake_case :
def __init__( self : Tuple ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase )
def lowerCamelCase__ ( cls : Dict , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
__lowerCamelCase : int = kwargs.pop("config" , UpperCAmelCase )
__lowerCamelCase : Dict = kwargs.pop("trust_remote_code" , UpperCAmelCase )
__lowerCamelCase : Any = True
__lowerCamelCase , __lowerCamelCase : str = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Optional[int] = config_dict.get("image_processor_type" , UpperCAmelCase )
__lowerCamelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : List[str] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowerCamelCase : Dict = config_dict.pop("feature_extractor_type" , UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__lowerCamelCase : Tuple = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
__lowerCamelCase : Optional[int] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : int = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# It could be in `config.image_processor_type``
__lowerCamelCase : int = getattr(UpperCAmelCase , "image_processor_type" , UpperCAmelCase )
if hasattr(UpperCAmelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__lowerCamelCase : Optional[int] = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__lowerCamelCase : Any = image_processor_class_from_name(UpperCAmelCase )
__lowerCamelCase : str = image_processor_auto_map is not None
__lowerCamelCase : Optional[Any] = image_processor_class is not None or type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
__lowerCamelCase : Dict = resolve_trust_remote_code(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if has_remote_code and trust_remote_code:
__lowerCamelCase : Optional[Any] = get_class_from_dynamic_module(
UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : List[Any] = kwargs.pop("code_revision" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
__lowerCamelCase : Tuple = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase )]
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase , UpperCAmelCase )
| 64
| 1
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path / "cache"
lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : List[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[int] = features.copy() if features else default_expected_features
lowerCAmelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : int = tmp_path / "cache"
lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = [parquet_path]
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[int] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
lowerCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path / "cache"
lowerCAmelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[int] = features.copy() if features else default_expected_features
lowerCAmelCase : int = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Tuple = ParquetDatasetReader({"train": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if split:
lowerCAmelCase : Any = {split: parquet_path}
else:
lowerCAmelCase : Optional[int] = "train"
lowerCAmelCase : Union[str, Any] = {"train": parquet_path, "test": parquet_path}
lowerCAmelCase : Tuple = tmp_path / "cache"
lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : List[Any] = pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCAmelCase : Optional[int] = pf.read()
assert dataset.data.table == output_table
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = str(shared_datadir / "test_image_rgb.jpg" )
lowerCAmelCase : Optional[Any] = {"image": [image_path]}
lowerCAmelCase : List[str] = Features({"image": Image()} )
lowerCAmelCase : List[Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : Optional[int] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase : Tuple = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 108
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : int =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Dict = VideoClassificationPipeline(model=snake_case__ , image_processor=snake_case__ , top_k=2 )
lowerCAmelCase : Any = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for example in examples:
lowerCAmelCase : str = video_classifier(snake_case__ )
self.assertEqual(
snake_case__ , [
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
] , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowerCAmelCase : str = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowerCAmelCase : int = pipeline(
"video-classification" , model=snake_case__ , feature_extractor=snake_case__ , frame_sampling_rate=4 )
lowerCAmelCase : Optional[int] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Union[str, Any] = video_classifier(snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
lowerCAmelCase : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
pass
| 108
| 1
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a__ = datasets.load_iris()
a__ = np.array(data['''data'''])
a__ = np.array(data['''target'''])
a__ = data['''target_names''']
a__ , a__ , a__ , a__ = train_test_split(X, y)
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Dict ) -> str:
"""simple docstring"""
return np.linalg.norm(np.array(__a ) - np.array(__a ) )
def __UpperCAmelCase ( __a : str ,__a : Dict ,__a : List[str] ,__a : Dict ,__a : Union[str, Any]=5 ) -> Tuple:
"""simple docstring"""
_a : Tuple = zip(__a ,__a )
# List of distances of all points from the point to be classified
_a : Optional[Any] = []
for data_point in data:
_a : int = euclidean_distance(data_point[0] ,__a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a : List[str] = [i[1] for i in sorted(__a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a : List[str] = Counter(__a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 15
|
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15
| 1
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCamelCase_ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , _a : str ) -> Optional[int]:
super().__init__()
__lowerCamelCase : str = model
__lowerCamelCase : Dict = 2
__lowerCamelCase : Union[str, Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowercase ( self : List[str] ) -> Optional[Any]:
pass
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[str]:
# load longformer model from model identifier
__lowerCamelCase : List[str] = LongformerModel.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Tuple = LightningModel(_lowerCAmelCase )
__lowerCamelCase : List[str] = torch.load(_lowerCAmelCase ,map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__lowerCamelCase : Dict = LongformerForQuestionAnswering.from_pretrained(_lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_lowerCAmelCase )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 208
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__lowerCamelCase : str = os.path.abspath(_lowerCAmelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase ,map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__lowerCamelCase : Tuple = convert_pytorch_state_dict_to_flax(_lowerCAmelCase ,_lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__lowerCamelCase : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase ,_lowerCAmelCase )
return flax_state_dict
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(_lowerCAmelCase ) -> bool:
return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__lowerCamelCase : List[str] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__lowerCamelCase : int = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__lowerCamelCase : List[str] = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__lowerCamelCase : Tuple = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCamelCase : List[str] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__lowerCamelCase : List[Any] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCamelCase : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__lowerCamelCase : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCamelCase : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCamelCase : int = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__lowerCamelCase : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__lowerCamelCase : Union[str, Any] = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__lowerCamelCase : Tuple = pt_tuple_key[-2] + '_v'
if name is not None:
__lowerCamelCase : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
# convert pytorch tensor to numpy
__lowerCamelCase : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCamelCase : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__lowerCamelCase : List[Any] = flax_model.params['params']
else:
__lowerCamelCase : List[str] = flax_model.params
__lowerCamelCase : Tuple = flatten_dict(_lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCamelCase : Optional[int] = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_lowerCAmelCase )
__lowerCamelCase : str = {}
__lowerCamelCase : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCamelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCamelCase : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCamelCase : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCamelCase : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCamelCase ,__lowerCamelCase : Dict = rename_key_and_reshape_tensor(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# add model prefix if necessary
__lowerCamelCase : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCamelCase : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__lowerCamelCase : List[Any] = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCamelCase : List[Any] = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCamelCase : str = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Any:
import torch
# Load the index
__lowerCamelCase : Optional[int] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__lowerCamelCase : Dict = torch.load(_lowerCAmelCase )
__lowerCamelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCamelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCamelCase : str = flax_model.params['params']
__lowerCamelCase : Tuple = flatten_dict(_lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__lowerCamelCase : Dict = flax_model.params
__lowerCamelCase : Optional[int] = flatten_dict(_lowerCAmelCase )
__lowerCamelCase : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCamelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCamelCase : Optional[Any] = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCamelCase : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCamelCase : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCamelCase ,__lowerCamelCase : Any = rename_key_and_reshape_tensor(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# add model prefix if necessary
__lowerCamelCase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCamelCase : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__lowerCamelCase : int = jnp.asarray(_lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__lowerCamelCase : Tuple = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCamelCase : Optional[int] = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCamelCase : Tuple = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = os.path.abspath(_lowerCAmelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__lowerCamelCase : str = getattr(_lowerCAmelCase ,'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCAmelCase ,'rb' ) as state_f:
try:
__lowerCamelCase : Tuple = from_bytes(_lowerCAmelCase ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_lowerCAmelCase ,_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__lowerCamelCase : Any = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa ,_lowerCAmelCase ) ).values()
if any(_lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__lowerCamelCase : Dict = jax.tree_util.tree_map(
lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,_lowerCAmelCase )
__lowerCamelCase : Any = flatten_dict(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = pt_model.state_dict()
__lowerCamelCase : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__lowerCamelCase : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__lowerCamelCase : Any = []
__lowerCamelCase : Union[str, Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCamelCase : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
__lowerCamelCase : Dict = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCamelCase : List[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCamelCase : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# conv layer
__lowerCamelCase : Tuple = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase : Tuple = jnp.transpose(_lowerCAmelCase ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# linear layer
__lowerCamelCase : Dict = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCamelCase : Optional[Any] = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__lowerCamelCase : str = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__lowerCamelCase : int = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__lowerCamelCase : Tuple = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__lowerCamelCase : Optional[int] = '.'.join(_lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__lowerCamelCase : Tuple = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__lowerCamelCase : str = key.split('.' )
__lowerCamelCase : Tuple = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__lowerCamelCase : Any = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__lowerCamelCase : Tuple = key_components[-2] + '_v'
if name is not None:
__lowerCamelCase : Optional[int] = key_components[:-3] + [name]
__lowerCamelCase : Union[str, Any] = '.'.join(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = key
if flax_key in special_pt_names:
__lowerCamelCase : int = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowerCamelCase : Tuple = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase ,np.ndarray ) else flax_tensor
__lowerCamelCase : List[str] = torch.from_numpy(_lowerCAmelCase )
# remove from missing keys
missing_keys.remove(_lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCAmelCase )
pt_model.load_state_dict(_lowerCAmelCase )
# re-transform missing_keys to list
__lowerCamelCase : int = list(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_lowerCAmelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 208
| 1
|
import math
from datetime import datetime, timedelta
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = year % 19
UpperCamelCase = year % 4
UpperCamelCase = year % 7
UpperCamelCase = math.floor(year / 100 )
UpperCamelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase = leap_day_inhibits / 4
UpperCamelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 18 )
else:
return datetime(__UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_SCREAMING_SNAKE_CASE = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 369
|
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 0
|
import os
import sys
import unittest
lowercase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Optional[int] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowercase : List[str] = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = get_test_to_tester_mapping(snake_case )
lowercase : Dict = get_test_to_tester_mapping(snake_case )
lowercase : str = {"""BertModelTest""": """BertModelTester"""}
lowercase : Dict = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = get_model_to_test_mapping(snake_case )
lowercase : str = get_model_to_test_mapping(snake_case )
lowercase : List[Any] = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
lowercase : List[str] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = get_model_to_tester_mapping(snake_case )
lowercase : Any = get_model_to_tester_mapping(snake_case )
lowercase : List[str] = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
lowercase : List[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) ,snake_case )
| 20
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''instructblip_vision_model'''
def __init__( self : str , lowerCAmelCase__ : Dict=1_4_0_8 , lowerCAmelCase__ : int=6_1_4_4 , lowerCAmelCase__ : List[str]=3_9 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : Tuple=2_2_4 , lowerCAmelCase__ : Tuple=1_4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Union[str, Any]=1e-6 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[int]=1e-10 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Tuple = qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''instructblip_qformer'''
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any]=3_0_5_2_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Optional[int]=1e-12 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=1_4_0_8 , **lowerCAmelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Tuple = cross_attention_frequency
_UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Tuple = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''instructblip'''
UpperCamelCase_ : Dict = True
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=3_2 , **lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : List[str] = InstructBlipVisionConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = InstructBlipQFormerConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.text_config.tie_word_embeddings
_UpperCAmelCase : List[Any] = self.text_config.is_encoder_decoder
_UpperCAmelCase : List[str] = num_query_tokens
_UpperCAmelCase : int = self.vision_config.hidden_size
_UpperCAmelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : int = 1.0
_UpperCAmelCase : Dict = 0.02
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : InstructBlipVisionConfig , lowerCAmelCase__ : InstructBlipQFormerConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.qformer_config.to_dict()
_UpperCAmelCase : List[Any] = self.text_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
| 145
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : List[str] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case : Union[str, Any] = ["small", "medium", "large"]
_snake_case : List[Any] = "lm_head.decoder.weight"
_snake_case : Optional[Any] = "lm_head.weight"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = torch.load(__lowerCamelCase )
__snake_case : Dict = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_snake_case : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_snake_case : List[str] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''yjernite/retribert-base-uncased''': 5_12,
}
A_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Tuple = tokenize_chinese_chars
_snake_case : Tuple = normalizer_class(**a_ )
_snake_case : List[str] = do_lower_case
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 64
| 1
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar("T")
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return (position - 1) // 2
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return (2 * position) + 1
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return (2 * position) + 2
class UpperCamelCase__( Generic[T] ):
def __init__( self ) -> None:
A__ = []
A__ = {}
A__ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def snake_case__ ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
A__ = self.elements
self.elements += 1
self._bubble_up(__UpperCAmelCase )
def snake_case__ ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
A__ , A__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A__ , A__ = self.heap[0]
self._bubble_down(__UpperCAmelCase )
return elem
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Update the weight of the given key
A__ = self.position_map[elem]
A__ = (elem, weight)
if position > 0:
A__ = get_parent_position(__UpperCAmelCase )
A__ , A__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__UpperCAmelCase )
else:
self._bubble_down(__UpperCAmelCase )
else:
self._bubble_down(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
A__ = self.position_map[elem]
if curr_pos == 0:
return None
A__ = get_parent_position(__UpperCAmelCase )
A__ , A__ = self.heap[curr_pos]
A__ , A__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_up(__UpperCAmelCase )
return None
def snake_case__ ( self ,__UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
A__ = self.position_map[elem]
A__ , A__ = self.heap[curr_pos]
A__ = get_child_left_position(__UpperCAmelCase )
A__ = get_child_right_position(__UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
A__ , A__ = self.heap[child_left_position]
A__ , A__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_down(__UpperCAmelCase )
if child_left_position < self.elements:
A__ , A__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_down(__UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
A__ , A__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_down(__UpperCAmelCase )
return None
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
A__ = self.heap[nodea_pos][0]
A__ = self.heap[nodea_pos][0]
A__ , A__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A__ = nodea_pos
A__ = nodea_pos
class UpperCamelCase__( Generic[T] ):
def __init__( self ) -> None:
A__ = {}
A__ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def snake_case__ ( self ,__UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
A__ = {}
self.nodes += 1
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
A__ = weight
A__ = weight
def UpperCAmelCase ( UpperCamelCase__ , ):
"""simple docstring"""
A__ = {node: maxsize for node in graph.connections}
A__ = {node: None for node in graph.connections}
A__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase__ , UpperCamelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
A__ = priority_queue.extract_min()
A__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
A__ = node
# running prim's algorithm
while not priority_queue.is_empty():
A__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
A__ = node
return dist, parent
| 154
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case__ ( self ) -> int:
A__ = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
A__ = [sys.executable] + distributed_args
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
| 154
| 1
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :str = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data['data'])
SCREAMING_SNAKE_CASE :Union[str, Any] = np.array(data['target'])
SCREAMING_SNAKE_CASE :Dict = data['target_names']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[str] = train_test_split(X, y)
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
return np.linalg.norm(np.array(a_ ) - np.array(a_ ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> List[Any]:
"""simple docstring"""
__A = zip(a_ , a_ )
# List of distances of all points from the point to be classified
__A = []
for data_point in data:
__A = euclidean_distance(data_point[0] , a_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A = [i[1] for i in sorted(a_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A = Counter(a_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 15
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :str = 'tf_model.h5'
SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :str = 'model.ckpt'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :str = 'config.json'
SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[int] = '▁'
SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 15
| 1
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A: List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : str = XLMRobertaTokenizer
__lowerCAmelCase : List[Any] = XLMRobertaTokenizerFast
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Tuple = True
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = """<pad>"""
UpperCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1002 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = XLMRobertaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
UpperCAmelCase : Dict = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase : List[str] = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE , legacy_format=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Dict = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_SCREAMING_SNAKE_CASE , f.name )
UpperCAmelCase : List[Any] = XLMRobertaTokenizer(f.name , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = pickle.dumps(_SCREAMING_SNAKE_CASE )
pickle.loads(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase : Dict = """I was born in 92000, and this is falsé."""
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase : Dict = tokenizer.encode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """Hello World!"""
UpperCAmelCase : Any = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCAmelCase : List[Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 76
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _snake_case ( UpperCamelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(UpperCamelCase ):
UpperCAmelCase : List[Any] = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase , UpperCamelCase ).lstrip("""./""" )
def _snake_case ( UpperCamelCase : Union[str, Any] ):
return F"{i * ' '}*" if i else "\n##"
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(UpperCamelCase )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def _snake_case ( UpperCamelCase : str = "." ):
UpperCAmelCase : Union[str, Any] = """"""
for filepath in sorted(good_file_paths(UpperCamelCase ) ):
UpperCAmelCase , UpperCAmelCase : Any = os.path.split(UpperCamelCase )
if filepath != old_path:
UpperCAmelCase : Optional[int] = print_path(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : str = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase : Optional[int] = F"{filepath}/{filename}".replace(""" """ , """%20""" )
UpperCAmelCase : Optional[int] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F"{md_prefix(UpperCamelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 76
| 1
|
from PIL import Image
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(snake_case ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(snake_case )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
A__ = change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 82
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[str] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["YolosFeatureExtractor"]
A_ : Optional[int] = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165
| 0
|
'''simple docstring'''
def __magic_name__ ( A ) -> Optional[Any]:
snake_case = len(A )
while cur > 1:
# Find the maximum number in arr
snake_case = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case = arr[mi::-1] + arr[mi + 1 : len(A )]
# Reverse whole list
snake_case = arr[cur - 1 :: -1] + arr[cur : len(A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 332
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __A ( lowercase_ ):
"""simple docstring"""
__lowerCAmelCase = 42
class __A ( lowercase_, lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 6_5536 , __A = None , __A = 2 , __A = 2 , __A = 0 , __A = "fourier" , __A = True , __A = False , __A = 0.0 , __A = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __A = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __A = "UNetMidBlock1D" , __A = None , __A = (32, 32, 64) , __A = None , __A = 8 , __A = 1 , __A = False , ) -> List[Any]:
super().__init__()
a =sample_size
# time
if time_embedding_type == "fourier":
a =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
a =2 * block_out_channels[0]
elif time_embedding_type == "positional":
a =Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
a =block_out_channels[0]
if use_timestep_embedding:
a =block_out_channels[0] * 4
a =TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
a =nn.ModuleList([] )
a =None
a =nn.ModuleList([] )
a =None
# down
a =in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
a =output_channel
a =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
a =i == len(lowerCAmelCase_ ) - 1
a =get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
a =get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
a =list(reversed(lowerCAmelCase_ ) )
a =reversed_block_out_channels[0]
if out_block_type is None:
a =out_channels
else:
a =block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
a =output_channel
a =(
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
a =i == len(lowerCAmelCase_ ) - 1
a =get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
a =output_channel
# out
a =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
a =get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = True , ) -> Union[UNetaDOutput, Tuple]:
a =timestep
if not torch.is_tensor(lowerCAmelCase_ ):
a =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
a =timesteps[None].to(sample.device )
a =self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
a =self.time_mlp(lowerCAmelCase_ )
else:
a =timestep_embed[..., None]
a =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
a =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
a =()
for downsample_block in self.down_blocks:
a =downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
a =self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
a =down_block_res_samples[-1:]
a =down_block_res_samples[:-1]
a =upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
a =self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 81
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[Any] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 134
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : Dict = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
SCREAMING_SNAKE_CASE__ : str = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Tuple:
if isinstance(lowercase__ , lowercase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=False ) -> str:
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
__lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=None ) -> List[Any]:
__lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
__lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
__lowerCamelCase = checkpoint[f'''{old_prefix}.norm.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.norm.bias''']
__lowerCamelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
__lowerCamelCase = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ) -> int:
__lowerCamelCase = torch.load(lowercase__ , map_location='''cpu''' )
__lowerCamelCase = {}
__lowerCamelCase = checkpoint["""time_embed.0.weight"""]
__lowerCamelCase = checkpoint["""time_embed.0.bias"""]
__lowerCamelCase = checkpoint["""time_embed.2.weight"""]
__lowerCamelCase = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
__lowerCamelCase = checkpoint["""label_emb.weight"""]
__lowerCamelCase = checkpoint["""input_blocks.0.0.weight"""]
__lowerCamelCase = checkpoint["""input_blocks.0.0.bias"""]
__lowerCamelCase = unet_config["""down_block_types"""]
__lowerCamelCase = unet_config["""layers_per_block"""]
__lowerCamelCase = unet_config["""attention_head_dim"""]
__lowerCamelCase = unet_config["""block_out_channels"""]
__lowerCamelCase = 1
__lowerCamelCase = channels_list[0]
for i, layer_type in enumerate(lowercase__ ):
__lowerCamelCase = channels_list[i]
__lowerCamelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase__ ):
__lowerCamelCase = f'''down_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase__ ):
__lowerCamelCase = f'''down_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
__lowerCamelCase = f'''down_blocks.{i}.attentions.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.1'''
__lowerCamelCase = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
__lowerCamelCase = f'''down_blocks.{i}.downsamplers.0'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
__lowerCamelCase = current_channels
# hardcoded the mid-block for now
__lowerCamelCase = """mid_block.resnets.0"""
__lowerCamelCase = """middle_block.0"""
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCamelCase = """mid_block.attentions.0"""
__lowerCamelCase = """middle_block.1"""
__lowerCamelCase = convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCamelCase = """mid_block.resnets.1"""
__lowerCamelCase = """middle_block.2"""
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCamelCase = 0
__lowerCamelCase = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowercase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'''up_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
__lowerCamelCase = f'''up_blocks.{i}.upsamplers.0'''
__lowerCamelCase = f'''output_blocks.{current_layer-1}.1'''
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'''up_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
__lowerCamelCase = f'''up_blocks.{i}.attentions.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.1'''
__lowerCamelCase = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
__lowerCamelCase = f'''up_blocks.{i}.upsamplers.0'''
__lowerCamelCase = f'''output_blocks.{current_layer-1}.2'''
__lowerCamelCase = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowerCamelCase = checkpoint["""out.0.weight"""]
__lowerCamelCase = checkpoint["""out.0.bias"""]
__lowerCamelCase = checkpoint["""out.2.weight"""]
__lowerCamelCase = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = strabool(args.class_cond)
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Any = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : str = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : str = con_pt_to_diffuser(args.unet_path, unet_config)
SCREAMING_SNAKE_CASE__ : int = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Dict = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
SCREAMING_SNAKE_CASE__ : Tuple = CMStochasticIterativeScheduler(**scheduler_config)
SCREAMING_SNAKE_CASE__ : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 351
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 339
| 0
|
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( _A : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __UpperCamelCase ( _A : str ) ->List[Any]:
"""simple docstring"""
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __UpperCamelCase ( _A : Tuple ) ->Optional[int]:
"""simple docstring"""
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__A : Tuple = 1
while K:
__A : str = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__A : Any = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 154
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
| 1
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCamelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
UpperCamelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
UpperCamelCase = BeautifulSoup(res.text, 'html.parser')
UpperCamelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 369
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase__ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = "sgugger/tiny-distilbert-classification"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , only_pretrain_model=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> int:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , torchscript=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# set architectures equal to `None`
lowerCAmelCase__ = None
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Any ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def a ( self : int ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , save_to_csv=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) ).exists() )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE__ : List[Any] ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "sequential" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "cumulative" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "current" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) , log_print=SCREAMING_SNAKE_CASE__ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) ).exists() )
| 221
| 0
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : int , *a : List[Any] , **a : Any ) -> None:
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , a , )
super().__init__(*a , **a )
| 76
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_a):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = offset
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2)
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a )
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a )
if do_rescale:
SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a )
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a )
SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a )
return image
def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a )
SCREAMING_SNAKE_CASE : List[Any] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a )
| 76
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a : Tuple = logging.get_logger(__name__)
a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a : List[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a : Dict = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
a : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
a : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
a : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a : Union[str, Any] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a : List[Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a : Optional[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_lowerCAmelCase )
class _a :
def __call__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
elif titles is None or texts is None:
UpperCAmelCase_: str = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: int = titles if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else [titles]
UpperCAmelCase_: List[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else [texts]
UpperCAmelCase_: str = len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = questions if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.' )
UpperCAmelCase_: Dict = super().__call__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
UpperCAmelCase_: str = super().__call__(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
UpperCAmelCase_: Optional[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_: Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_: int = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 64, SCREAMING_SNAKE_CASE_ = 4, ) -> List[DPRSpanPrediction]:
UpperCAmelCase_: Tuple = reader_input["""input_ids"""]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = reader_output[:3]
UpperCAmelCase_: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = sorted(range(SCREAMING_SNAKE_CASE_ ), reverse=SCREAMING_SNAKE_CASE_, key=relevance_logits.__getitem__ )
UpperCAmelCase_: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_: Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_: Union[str, Any] = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_: List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_: Optional[int] = len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=SCREAMING_SNAKE_CASE_, top_spans=SCREAMING_SNAKE_CASE_, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=SCREAMING_SNAKE_CASE_, start_index=SCREAMING_SNAKE_CASE_, end_index=SCREAMING_SNAKE_CASE_, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(SCREAMING_SNAKE_CASE_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[DPRSpanPrediction]:
UpperCAmelCase_: Optional[Any] = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_: Tuple = sorted(SCREAMING_SNAKE_CASE_, key=lambda SCREAMING_SNAKE_CASE_ : x[1], reverse=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
UpperCAmelCase_: int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCAmelCase )
class _a ( _lowerCAmelCase , _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = READER_PRETRAINED_VOCAB_FILES_MAP
A = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = READER_PRETRAINED_INIT_CONFIGURATION
A = ['''input_ids''', '''attention_mask''']
| 82
|
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 1
|
"""simple docstring"""
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
while cur > 1:
# Find the maximum number in arr
__UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(snake_case_ )]
# Reverse whole list
__UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(snake_case_ )]
cur -= 1
return arr
if __name__ == "__main__":
_lowercase : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_lowercase : Dict = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 332
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = str(id_ )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = []
__UpperCAmelCase = {} # {vertex:distance}
def __lt__( self : str , _lowercase : List[Any] ):
return self.key < other.key
def __repr__( self : int ):
return self.id
def a ( self : Union[str, Any] , _lowercase : int ):
self.neighbors.append(_lowercase )
def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = weight
def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case_ )
graph[b - 1].add_edge(graph[a - 1] , snake_case_ )
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
__UpperCAmelCase = []
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = graph[:]
while q:
__UpperCAmelCase = min(snake_case_ )
q.remove(snake_case_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
for i in range(1 , len(snake_case_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = list(snake_case_ )
hq.heapify(snake_case_ )
while h:
__UpperCAmelCase = hq.heappop(snake_case_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
hq.heapify(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
| 1
|
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = '''docs/source/en/_toctree.yml'''
def snake_case_ ( snake_case ) -> Optional[int]:
lowercase__: List[Any] = defaultdict(a_ )
lowercase__: Union[str, Any] = []
lowercase__: Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(a_ )
lowercase__: Union[str, Any] = new_doc_list
lowercase__: str = [key for key, value in counts.items() if value > 1]
lowercase__: Optional[Any] = []
for duplicate_key in duplicates:
lowercase__: Union[str, Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(a_ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
lowercase__: Tuple = sorted(a_ , key=lambda snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(a_ )
# Sort
return overview_doc
def snake_case_ ( snake_case=False ) -> str:
with open(a_ , encoding='utf-8' ) as f:
lowercase__: Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__: str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__: Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
lowercase__: str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__: List[str] = api_doc[scheduler_idx]['sections']
lowercase__: Tuple = clean_doc_toc(a_ )
lowercase__: Union[str, Any] = False
if new_scheduler_doc != scheduler_doc:
lowercase__: Optional[Any] = True
if overwrite:
lowercase__: List[str] = new_scheduler_doc
if diff:
if overwrite:
lowercase__: Tuple = api_doc
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a_ , allow_unicode=a_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def snake_case_ ( snake_case=False ) -> Dict:
with open(a_ , encoding='utf-8' ) as f:
lowercase__: int = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__: int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__: Dict = content[api_idx]['sections']
# Then to the model doc
lowercase__: Optional[int] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__: Optional[int] = False
lowercase__: List[Any] = api_doc[pipeline_idx]['sections']
lowercase__: Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__: str = pipeline_doc['section']
lowercase__: List[Any] = clean_doc_toc(a_ )
if overwrite:
lowercase__: Any = new_sub_pipeline_doc
new_pipeline_docs.append(a_ )
# sort overall pipeline doc
lowercase__: List[str] = clean_doc_toc(a_ )
if new_pipeline_docs != pipeline_docs:
lowercase__: int = True
if overwrite:
lowercase__: Optional[int] = new_pipeline_docs
if diff:
if overwrite:
lowercase__: Tuple = api_doc
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a_ , allow_unicode=a_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 364
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Tuple = ['input_values', 'attention_mask']
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 16_000 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 7_600 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Dict = do_normalize
lowercase__: Optional[Any] = return_attention_mask
lowercase__: str = num_mel_bins
lowercase__: Dict = hop_length
lowercase__: Dict = win_length
lowercase__: Optional[int] = win_function
lowercase__: Any = frame_signal_scale
lowercase__: Tuple = fmin
lowercase__: Tuple = fmax
lowercase__: Dict = mel_floor
lowercase__: int = reduction_factor
lowercase__: List[Any] = win_length * sampling_rate // 1_000
lowercase__: Optional[Any] = hop_length * sampling_rate // 1_000
lowercase__: Optional[int] = optimal_fft_length(self.sample_size )
lowercase__: Optional[Any] = (self.n_fft // 2) + 1
lowercase__: str = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
lowercase__: Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowercase__: List[str] = np.array(lowerCAmelCase__ , np.intaa )
lowercase__: Tuple = []
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
lowercase__: int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__: Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowercase__: Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
lowercase__: List[str] = spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowercase__: Dict = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
lowercase__: str = None
if audio_target is not None:
lowercase__: List[str] = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
lowercase__: int = inputs_target['input_values']
lowercase__: List[str] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__: Optional[int] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
lowercase__: int = isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase__: Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__: Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowercase__: Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase__: Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase__: str = self.feature_size
# convert into correct format for padding
if is_target:
lowercase__: int = [self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
lowercase__: Dict = BatchFeature({'input_values': features} )
lowercase__: Union[str, Any] = self.num_mel_bins
else:
lowercase__: Union[str, Any] = BatchFeature({'input_values': speech} )
lowercase__: Dict = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: List[str] = feature_size_hack
# convert input values to correct format
lowercase__: Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowercase__: List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase__: Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase__: Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase__: Tuple = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__: str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase__: Tuple = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__: str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
lowercase__: Union[str, Any] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase__: str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 288
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
SCREAMING_SNAKE_CASE_: Tuple ={
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
SCREAMING_SNAKE_CASE_: Tuple ={
'allenai/longformer-base-4096': 40_96,
'allenai/longformer-large-4096': 40_96,
'allenai/longformer-large-4096-finetuned-triviaqa': 40_96,
'allenai/longformer-base-4096-extra.pos.embd.only': 40_96,
'allenai/longformer-large-4096-extra.pos.embd.only': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class __A ( UpperCamelCase__ ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__(self : Any , __a : Optional[Any] , __a : Union[str, Any] , __a : List[str]="replace" , __a : List[str]="<s>" , __a : str="</s>" , __a : Dict="</s>" , __a : Tuple="<s>" , __a : Optional[Any]="<unk>" , __a : List[Any]="<pad>" , __a : Dict="<mask>" , __a : Any=False , **__a : int , ):
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _lowercase (self : str ):
return len(self.encoder )
def _lowercase (self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase (self : Any , __a : Any ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(__a )
UpperCAmelCase_ = get_pairs(__a )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(__a ):
try:
UpperCAmelCase_ = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(__a )
UpperCAmelCase_ = new_word
if len(__a ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(__a )
UpperCAmelCase_ = " ".join(__a )
UpperCAmelCase_ = word
return word
def _lowercase (self : Tuple , __a : Tuple ):
UpperCAmelCase_ = []
for token in re.findall(self.pat , __a ):
UpperCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(" " ) )
return bpe_tokens
def _lowercase (self : Optional[Any] , __a : int ):
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def _lowercase (self : str , __a : Optional[Any] ):
return self.decoder.get(__a )
def _lowercase (self : Dict , __a : Dict ):
UpperCAmelCase_ = "".join(__a )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase (self : Union[str, Any] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
UpperCAmelCase_ = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase (self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase (self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def _lowercase (self : int , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self : List[Any] , __a : Tuple , __a : Optional[Any]=False , **__a : Dict ):
UpperCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = " " + text
return (text, kwargs)
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a : Dict = HfArgumentParser(InitializationArguments)
a : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a : int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a : Union[str, Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
a : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a : Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 368
|
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__=None ):
'''simple docstring'''
lowercase__ : Union[str, Any]= data
lowercase__ : Optional[Any]= None
def __repr__( self ):
'''simple docstring'''
lowercase__ : str= []
lowercase__ : Tuple= self
while temp:
string_rep.append(F'''{temp.data}''' )
lowercase__ : Optional[int]= temp.next
return "->".join(snake_case__ )
def lowercase__(A ) ->Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ : Optional[int]= Node(elements_list[0] )
for i in range(1 , len(A ) ):
lowercase__ : Optional[Any]= Node(elements_list[i] )
lowercase__ : str= current.next
return head
def lowercase__(A ) ->None:
"""simple docstring"""
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase__() ->str:
"""simple docstring"""
from doctest import testmod
testmod()
lowercase__ : Optional[int]= make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A )
print("Elements in Reverse:" )
print_reverse(A )
if __name__ == "__main__":
main()
| 150
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : List[Any] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['MaskFormerFeatureExtractor']
a__ : Optional[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
a__ : int = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 80
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["ChineseCLIPFeatureExtractor"]
__lowerCamelCase = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 187
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = []
if args.gold_data_mode == "qa":
lowerCAmelCase = pd.read_csv(_SCREAMING_SNAKE_CASE , sep="""\t""" , header=_SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
lowerCAmelCase = ast.literal_eval(_SCREAMING_SNAKE_CASE )
answers.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [[reference] for reference in references]
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = 100.0 * em / total
lowerCAmelCase = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = args.k
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = lowerCAmelCase = 0
for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = set(hypo.split("""\t""" )[:k] )
lowerCAmelCase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
def strip_title(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if title.startswith("""\"""" ):
lowerCAmelCase = title[1:]
if title.endswith("""\"""" ):
lowerCAmelCase = title[:-1]
return title
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
lowerCAmelCase = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = question_enc_outputs[0]
lowerCAmelCase = rag_model.retriever(
_SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase = []
for docs in all_docs:
lowerCAmelCase = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(_SCREAMING_SNAKE_CASE ) )
return provenance_strings
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs_dict.input_ids.to(args.device )
lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return answers
def _snake_case ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=_SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=_SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=_SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=_SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=_SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=_SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=_SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = {}
if args.model_type is None:
lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowerCAmelCase = args.n_docs
if args.index_name is not None:
lowerCAmelCase = args.index_name
if args.index_path is not None:
lowerCAmelCase = args.index_path
else:
lowerCAmelCase = BartForConditionalGeneration
lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(_SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
lowerCAmelCase = []
for line in tqdm(_SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
lowerCAmelCase = []
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase = get_args()
main(args)
| 187
| 1
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
A__ = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _UpperCAmelCase ( snake_case = "mumbai" ):
"""simple docstring"""
_lowerCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
_lowerCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_lowerCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 82
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_lowerCAmelCase = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_snake_case ) , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_snake_case ) , x.transpose() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , np.asarray(transpose(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(_snake_case , axes=(1, 2, 0) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.reshape(_snake_case , (4, 3) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.reshape(_snake_case , (12, 5) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.asarray(reshape(_snake_case , (4, 3) ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.asarray(reshape(_snake_case , (12, 5) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.squeeze(_snake_case ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.squeeze(_snake_case , axis=2 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.asarray(squeeze(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.asarray(squeeze(_snake_case , axis=2 ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.expand_dims(_snake_case , axis=1 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.asarray(expand_dims(_snake_case , axis=1 ) ) ) )
| 82
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = """▁"""
_lowercase : List[str] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Optional[int] = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
_lowercase : Any = {
"""facebook/xglm-564M""": 2048,
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : str="<s>" , lowerCAmelCase : Optional[Any]="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : List[Any]="<s>" , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase = 7
UpperCAmelCase = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCAmelCase )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def a__( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase ))
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase ))
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def a__( self : int )-> Optional[int]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[Any] , lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : int , lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : Optional[int] , lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(lowerCAmelCase ).replace(lowerCAmelCase , ''' ''' ).strip()
return out_string
def a__( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 91
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( A : int , A : int , A : int , A : int , A : int , A : int ):
'''simple docstring'''
if (ksize % 2) == 0:
UpperCAmelCase = ksize + 1
UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A ):
for x in range(A ):
# distance from center
UpperCAmelCase = x - ksize // 2
UpperCAmelCase = y - ksize // 2
# degree to radiant
UpperCAmelCase = theta / 1_80 * np.pi
UpperCAmelCase = np.cos(_theta )
UpperCAmelCase = np.sin(_theta )
# get kernel x
UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowercase : Tuple = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_lowercase : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowercase : List[str] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowercase : List[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowercase : Optional[int] = out / out.max() * 255
_lowercase : Optional[int] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 91
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def _snake_case( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
A__ = (
'Wrong input data\'s dimensions... '
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ = (
'Wrong input data\'s shape... '
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
A__ = (
'Input data have different datatype... '
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
A__ = []
for value in value_array:
A__ = euclidean(SCREAMING_SNAKE_CASE__ , dataset[0] )
A__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ = euclidean(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if dist > temp_dist:
A__ = temp_dist
A__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ) -> float:
'''simple docstring'''
return np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / (norm(SCREAMING_SNAKE_CASE__ ) * norm(SCREAMING_SNAKE_CASE__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'sentencepiece.model'}
UpperCAmelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCAmelCase__ = {
'google/rembert': 256,
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : str="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Any="[MASK]" , **_lowerCamelCase : Optional[int] , ):
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(_lowerCamelCase )
@property
def lowercase ( self : int ):
return len(self.sp_model )
def lowercase ( self : Any ):
_snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Tuple ):
_snake_case = d
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
_snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase )
return pieces
def lowercase ( self : str , _lowerCamelCase : str ):
return self.sp_model.PieceToId(_lowerCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : int ):
return self.sp_model.IdToPiece(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ):
_snake_case = self.sp_model.decode_pieces(_lowerCamelCase )
return out_string
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 288
| 0
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
UpperCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for row in range(SCREAMING_SNAKE_CASE_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE :int = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __magic_name__ :
UpperCamelCase_ :str = PegasusConfig
UpperCamelCase_ :List[str] = {}
UpperCamelCase_ :str = """gelu"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , )-> Tuple:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCamelCase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_pegasus_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase_ , UpperCamelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
UpperCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
UpperCamelCase_ = model.decode(_lowercase , _lowercase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Tuple:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase_ , UpperCamelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Tuple:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase_ = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCamelCase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase_ :Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :Any = False
UpperCamelCase_ :Union[str, Any] = False
UpperCamelCase_ :Tuple = False
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxPegasusModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase )
UpperCamelCase_ = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase , _lowercase=None , **_lowercase ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest("JIT Enabled" ):
UpperCamelCase_ = encode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase_ = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCamelCase_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase , _lowercase , _lowercase ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest("JIT Enabled" ):
UpperCamelCase_ = decode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase_ = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self )-> int:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_lowercase )
UpperCamelCase_ = np.ones((1, 1) )
UpperCamelCase_ = model(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCamelCase_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCamelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCamelCase_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="np" , truncation=_lowercase , max_length=512 , padding=_lowercase )
UpperCamelCase_ = model.generate(**_lowercase , num_beams=2 ).sequences
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
assert tgt_text == decoded
| 60
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase__ : Dict = logging.getLogger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = """token-classification"""
def __init__( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
if type(lowerCAmelCase_ ) == dict:
_A: List[str] = Namespace(**lowerCAmelCase_ )
_A: Any = import_module('''tasks''' )
try:
_A: List[Any] = getattr(lowerCAmelCase_ , hparams.task_type )
_A: List[Any] = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
_A: Union[str, Any] = self.token_classification_task.get_labels(hparams.labels )
_A: List[Any] = CrossEntropyLoss().ignore_index
super().__init__(lowerCAmelCase_ , len(self.labels ) , self.mode )
def __magic_name__ ( self : Tuple , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
return self.model(**lowerCAmelCase_ )
def __magic_name__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
_A: Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
_A: Tuple = self(**lowerCAmelCase_ )
_A: List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.hparams
for mode in ["train", "dev", "test"]:
_A: int = self._feature_file(lowerCAmelCase_ )
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , lowerCAmelCase_ )
_A: List[Any] = torch.load(lowerCAmelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_A: Any = self.token_classification_task.read_examples_from_file(args.data_dir , lowerCAmelCase_ )
_A: Optional[Any] = self.token_classification_task.convert_examples_to_features(
lowerCAmelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowerCAmelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] = False ):
"""simple docstring"""
_A: Dict = self._feature_file(lowerCAmelCase_ )
logger.info('''Loading features from cached file %s''' , lowerCAmelCase_ )
_A: List[Any] = torch.load(lowerCAmelCase_ )
_A: Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_A: int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
_A: Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
_A: str = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
_A: Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , batch_size=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
"""Compute validation""" ""
_A: int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
_A: Dict = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
_A: Any = self(**lowerCAmelCase_ )
_A , _A: int = outputs[:2]
_A: Any = logits.detach().cpu().numpy()
_A: str = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __magic_name__ ( self : str , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Any = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
_A: List[str] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
_A: Optional[int] = np.argmax(lowerCAmelCase_ , axis=2 )
_A: str = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_A: int = dict(enumerate(self.labels ) )
_A: Dict = [[] for _ in range(out_label_ids.shape[0] )]
_A: int = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
_A: int = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(lowerCAmelCase_ , lowerCAmelCase_ ),
'''precision''': precision_score(lowerCAmelCase_ , lowerCAmelCase_ ),
'''recall''': recall_score(lowerCAmelCase_ , lowerCAmelCase_ ),
'''f1''': fa_score(lowerCAmelCase_ , lowerCAmelCase_ ),
}
_A: int = dict(results.items() )
_A: Optional[int] = results
return ret, preds_list, out_label_list
def __magic_name__ ( self : str , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A , _A , _A: str = self._eval_end(lowerCAmelCase_ )
_A: List[str] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A , _A: int = self._eval_end(lowerCAmelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
_A: Tuple = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __magic_name__ ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=lowerCAmelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=lowerCAmelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=lowerCAmelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase__ : Optional[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Tuple = NERTransformer(args)
UpperCAmelCase__ : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase__ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
UpperCAmelCase__ : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 121
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
snake_case = XCLIPTextConfig()
# derive patch size from model name
snake_case = model_name.find('patch' )
snake_case = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
snake_case = XCLIPVisionConfig(patch_size=_UpperCamelCase , num_frames=_UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
snake_case = 3_0_7_2
snake_case = 1_2
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 1_6
snake_case = 2_4
snake_case = 7_6_8
snake_case = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
snake_case = 3_3_6
snake_case = XCLIPConfig.from_text_vision_configs(_UpperCamelCase , _UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
return config
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
snake_case = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
snake_case = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
snake_case = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
snake_case = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
snake_case = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
snake_case = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
snake_case = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
snake_case = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
snake_case = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
snake_case = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
snake_case = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
snake_case = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
snake_case = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
snake_case = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
snake_case = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
snake_case = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
snake_case = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
snake_case = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
snake_case = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(_UpperCamelCase )
if "attn.in_proj" in key:
snake_case = key.split('.' )
if key.startswith('visual' ):
snake_case = key_split[3]
snake_case = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[
:dim
]
snake_case = val[
dim : dim * 2
]
snake_case = val[
-dim:
]
else:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
elif key.startswith('mit' ):
snake_case = key_split[2]
snake_case = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[dim : dim * 2]
snake_case = val[-dim:]
else:
snake_case = key_split[2]
snake_case = config.text_config.hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
else:
snake_case = rename_key(_UpperCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case = val.T
snake_case = val
return orig_state_dict
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
snake_case = 'eating_spaghetti_8_frames.npy'
elif num_frames == 1_6:
snake_case = 'eating_spaghetti.npy'
elif num_frames == 3_2:
snake_case = 'eating_spaghetti_32_frames.npy'
snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_UpperCamelCase , repo_type='dataset' , )
snake_case = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
snake_case = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
snake_case = model_to_url[model_name]
snake_case = 8
if "16-frames" in model_name:
snake_case = 1_6
elif "shot" in model_name:
snake_case = 3_2
snake_case = get_xclip_config(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
model.eval()
if "drive" in checkpoint_url:
snake_case = 'pytorch_model.bin'
gdown.cached_download(_UpperCamelCase , _UpperCamelCase , quiet=_UpperCamelCase )
snake_case = torch.load(_UpperCamelCase , map_location='cpu' )['model']
else:
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase )['model']
snake_case = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
snake_case ,snake_case = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case = 3_3_6 if model_name == 'xclip-large-patch14-16-frames' else 2_2_4
snake_case = VideoMAEImageProcessor(size=_UpperCamelCase )
snake_case = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = XCLIPProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase )
snake_case = prepare_video(_UpperCamelCase )
snake_case = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
snake_case = model(**_UpperCamelCase )
# Verify outputs
snake_case = outputs.logits_per_video
snake_case = logits_per_video.softmax(dim=1 )
print('Probs:' , _UpperCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_UpperCamelCase , organization='nielsr' )
processor.push_to_hub(_UpperCamelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(_UpperCamelCase , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 150
| 0
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCAmelCase (__lowerCAmelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 362
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 0
|
lowercase__ : Optional[Any] = 0 # The first color of the flag.
lowercase__ : List[Any] = 1 # The second color of the flag.
lowercase__ : Dict = 2 # The third color of the flag.
lowercase__ : Optional[Any] = (red, white, blue)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
snake_case_ = 0
snake_case_ = len(_A ) - 1
snake_case_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case_ , snake_case_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case_ , snake_case_ = sequence[high], sequence[mid]
high -= 1
else:
snake_case_ = f"The elements inside the sequence must contains only {colors} values"
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
lowercase__ : List[str] = [int(item.strip()) for item in user_input.split(",")]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 187
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__lowercase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__lowercase )
def snake_case__ ( self : Optional[int] , **__lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case_ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
snake_case_ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
snake_case_ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
snake_case_ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
snake_case_ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case_ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
snake_case_ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
snake_case_ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
snake_case_ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
snake_case_ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
snake_case_ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
snake_case_ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , __lowercase : List[str] , *__lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : List[str]=None , **__lowercase : Optional[Any] ):
"""simple docstring"""
return super().__call__(__lowercase , *__lowercase , num_workers=__lowercase , batch_size=__lowercase , **__lowercase )
def snake_case__ ( self : str , __lowercase : int , __lowercase : List[str]=64 , __lowercase : int = 0 , __lowercase : float = 5_12 / 15_00 , __lowercase : Optional[int] = 32 , __lowercase : Optional[int] = 1 , ):
"""simple docstring"""
snake_case_ = load_image(__lowercase )
snake_case_ = self.image_processor.size["longest_edge"]
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.generate_crop_boxes(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = self.image_processor(images=__lowercase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
snake_case_ = self.get_inference_context()
with inference_context():
snake_case_ = self._ensure_tensor_on_device(__lowercase , device=self.device )
snake_case_ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
snake_case_ = image_embeddings
snake_case_ = grid_points.shape[1]
snake_case_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __lowercase , __lowercase ):
snake_case_ = grid_points[:, i : i + points_per_batch, :, :]
snake_case_ = input_labels[:, i : i + points_per_batch]
snake_case_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=0.88 , __lowercase : Union[str, Any]=0.95 , __lowercase : int=0 , __lowercase : int=1 , ):
"""simple docstring"""
snake_case_ = model_inputs.pop("input_boxes" )
snake_case_ = model_inputs.pop("is_last" )
snake_case_ = model_inputs.pop("original_sizes" ).tolist()
snake_case_ = model_inputs.pop("reshaped_input_sizes" ).tolist()
snake_case_ = self.model(**__lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case_ = model_outputs["pred_masks"]
snake_case_ = self.image_processor.post_process_masks(
__lowercase , __lowercase , __lowercase , __lowercase , binarize=__lowercase )
snake_case_ = model_outputs["iou_scores"]
snake_case_ , snake_case_ , snake_case_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowercase , __lowercase , __lowercase , __lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case__ ( self : str , __lowercase : Any , __lowercase : Optional[int]=False , __lowercase : int=False , __lowercase : List[str]=0.7 , ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
snake_case_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
snake_case_ = torch.cat(__lowercase )
snake_case_ = torch.cat(__lowercase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.post_process_for_mask_generation(
__lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = defaultdict(__lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowercase )
snake_case_ = {}
if output_rle_mask:
snake_case_ = rle_mask
if output_bboxes_mask:
snake_case_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 187
| 1
|
import string
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = ''
for i in sequence:
lowercase__ = ord(_SCREAMING_SNAKE_CASE )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = string.ascii_letters
lowercase__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_SCREAMING_SNAKE_CASE )] if c in letters else c for c in sequence )
def __UpperCamelCase () -> None:
from timeit import timeit
print('Running performance benchmarks...' )
lowercase__ = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=_SCREAMING_SNAKE_CASE )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)" , setup=_SCREAMING_SNAKE_CASE )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 269
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCamelCase () -> str:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCamelCase () -> Any:
assert _test_patching.open is open
lowercase__ = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCamelCase () -> List[str]:
# pandas.read_csv is not present in _test_patching
lowercase__ = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _SCREAMING_SNAKE_CASE ):
pass
def __UpperCamelCase () -> List[str]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowercase__ = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _SCREAMING_SNAKE_CASE ) is None
with patch_submodule(_test_patching , 'len' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCamelCase () -> List[str]:
lowercase__ = '__test_patch_submodule_start_and_stop_mock__'
lowercase__ = patch_submodule(_test_patching , 'open' , _SCREAMING_SNAKE_CASE )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCamelCase () -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ = '__test_patch_submodule_successive_join__'
lowercase__ = '__test_patch_submodule_successive_dirname__'
lowercase__ = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.rename' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.dirname' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.dirname' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _SCREAMING_SNAKE_CASE ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _SCREAMING_SNAKE_CASE ):
pass
| 269
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 91
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "openai-gpt"
__UpperCamelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , lowercase_ : List[str]=40478 , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : int=0.02 , lowercase_ : Optional[int]="cls_index" , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = n_positions
SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd
SCREAMING_SNAKE_CASE_ : Dict = n_layer
SCREAMING_SNAKE_CASE_ : Any = n_head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = afn
SCREAMING_SNAKE_CASE_ : int = resid_pdrop
SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = summary_type
SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_activation
SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout
SCREAMING_SNAKE_CASE_ : List[str] = summary_proj_to_labels
super().__init__(**lowercase_)
| 91
| 1
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return [ord(SCREAMING_SNAKE_CASE_ ) - 9_6 for elem in plain]
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
return "".join(chr(elem + 9_6 ) for elem in encoded )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE_ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
| 315
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
if not isinstance(_snake_case, _snake_case):
raise TypeError('''only integers accepted as input''')
else:
__lowerCAmelCase = str(abs(_snake_case))
__lowerCAmelCase = [list(_snake_case) for char in range(len(_snake_case))]
for index in range(len(_snake_case)):
num_transpositions[index].pop(_snake_case)
return max(
int(''''''.join(list(_snake_case))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 174
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case__ : Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
snake_case__ : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case__ : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
snake_case__ : Optional[int] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
snake_case__ : int = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Dict = None
# source code of `config_class`
lowerCAmelCase : Union[str, Any] = inspect.getsource(_snake_case )
lowerCAmelCase : List[Any] = _re_checkpoint.findall(_snake_case )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase : Optional[int] = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase : List[str] = ckpt_name
break
return checkpoint
def _snake_case ( ):
lowerCAmelCase : List[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase : int = get_checkpoint_from_config_class(_snake_case )
lowerCAmelCase : int = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(sorted(_snake_case ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 60
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Dict = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370
|
from scipy.stats import pearsonr
import datasets
snake_case : Tuple = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
snake_case : Dict = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
snake_case : int = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=False ):
if return_pvalue:
__magic_name__ : Union[str, Any] = pearsonr(_a , _a )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_a , _a )[0] )}
| 41
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> str:
lowerCamelCase : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase : int = parent
lowerCamelCase : str = batch_size
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Dict = max_resolution
lowerCamelCase : Tuple = do_resize
lowerCamelCase : Tuple = size
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Any = image_mean
lowerCamelCase : int = image_std
def _lowercase ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def _lowercase ( self ) -> Tuple:
return self.image_proc_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def _lowercase ( self ) -> List[Any]:
pass
def _lowercase ( self ) -> List[str]:
# Initialize image_processor
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCamelCase : Union[str, Any] = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _lowercase ( self ) -> str:
# Initialize image_processor
lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCamelCase : Tuple = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _lowercase ( self ) -> int:
# Initialize image_processor
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCamelCase : Tuple = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 48
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
a__ ="""bloom"""
a__ =["""past_key_values"""]
a__ ={
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , A=2_5_0_8_8_0 , A=6_4 , A=2 , A=8 , A=1E-5 , A=0.02 , A=True , A=1 , A=2 , A=False , A=0.0 , A=0.0 , A=1 , A=False , **A , ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : Dict = n_layer
_UpperCAmelCase : int = n_head
_UpperCAmelCase : str = layer_norm_epsilon
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : List[Any] = pretraining_tp
_UpperCAmelCase : Union[str, Any] = apply_residual_connection_post_layernorm
_UpperCAmelCase : List[Any] = hidden_dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : Dict = eos_token_id
_UpperCAmelCase : str = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
a__ =version.parse('''1.12''' )
def __init__( self , A , A = "default" , A = None , A = False , ) -> str:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
_UpperCAmelCase : int = 0
@property
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return self._config.n_head
@property
def __lowerCAmelCase ( self ) -> int:
return 1E-3
def __lowerCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Dict:
_UpperCAmelCase : List[str] = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase : Tuple = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Tuple = seqlen + 2
_UpperCAmelCase : Optional[Any] = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase : Tuple = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase : List[Any] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
_UpperCAmelCase : Dict = common_inputs['''attention_mask''']
if self.use_past:
_UpperCAmelCase : List[str] = ordered_inputs['''attention_mask'''].dtype
_UpperCAmelCase : List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self ) -> int:
return 1_3
| 352
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''WhisperFeatureExtractor'''
a__ ='''WhisperTokenizer'''
def __init__( self , A , A ) -> Any:
super().__init__(A , A )
_UpperCAmelCase : int = self.feature_extractor
_UpperCAmelCase : List[str] = False
def __lowerCAmelCase ( self , A=None , A=None , A=True ) -> Optional[int]:
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
_UpperCAmelCase : str = kwargs.pop('''audio''' , A )
_UpperCAmelCase : Dict = kwargs.pop('''sampling_rate''' , A )
_UpperCAmelCase : Dict = kwargs.pop('''text''' , A )
if len(A ) > 0:
_UpperCAmelCase : List[Any] = args[0]
_UpperCAmelCase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(A , *A , sampling_rate=A , **A )
if text is not None:
_UpperCAmelCase : Any = self.tokenizer(A , **A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase : int = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> Any:
return self.tokenizer.decode(*A , **A )
def __lowerCAmelCase ( self , A , A="np" ) -> Any:
return self.tokenizer.get_prompt_ids(A , return_tensors=A )
| 68
| 0
|
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case : Tuple = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
return torch.atana(__snake_case ,__snake_case ) / math.pi * 2
def _lowercase ( __snake_case ) -> Any:
__lowerCAmelCase : Any = torch.sin(t * math.pi / 2 ) ** 2
__lowerCAmelCase : Optional[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__snake_case ,__snake_case )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> List[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : int = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4)
__lowerCAmelCase : Union[str, Any] = deepcopy(self.diffusion)
__lowerCAmelCase : Any = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE)
def _lowercase ( __snake_case ) -> Union[str, Any]:
__lowerCAmelCase : Union[str, Any] = MODELS_MAP[model_name]["url"]
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
__snake_case : Tuple = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__snake_case : int = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__snake_case : Dict = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__snake_case : List[Any] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__snake_case : Any = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__snake_case : Union[str, Any] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def _lowercase ( __snake_case ) -> Tuple:
if name.startswith("skip" ):
return name.replace("skip" ,RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] ,RES_CONV_MAP[name[:6]] )
def _lowercase ( __snake_case ) -> int:
for key, value in ATTN_MAP.items():
if name.startswith(__snake_case ) and not isinstance(__snake_case ,__snake_case ):
return name.replace(__snake_case ,__snake_case )
elif name.startswith(__snake_case ):
return [name.replace(__snake_case ,__snake_case ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def _lowercase ( __snake_case ,__snake_case=13 ) -> str:
__lowerCAmelCase : Optional[int] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" ,"time_proj" )
__lowerCAmelCase : List[Any] = 0
if string.startswith("net.3." ):
depth += 1
__lowerCAmelCase : List[Any] = string[6:]
elif string.startswith("net." ):
__lowerCAmelCase : Optional[Any] = string[4:]
while string.startswith("main.7." ):
depth += 1
__lowerCAmelCase : Dict = string[7:]
if string.startswith("main." ):
__lowerCAmelCase : str = string[5:]
# mid block
if string[:2].isdigit():
__lowerCAmelCase : int = string[:2]
__lowerCAmelCase : Dict = string[2:]
else:
__lowerCAmelCase : Tuple = string[0]
__lowerCAmelCase : Any = string[1:]
if depth == max_depth:
__lowerCAmelCase : Dict = MID_NUM_TO_LAYER[layer_num]
__lowerCAmelCase : Any = "mid_block"
elif depth > 0 and int(__snake_case ) < 7:
__lowerCAmelCase : int = DOWN_NUM_TO_LAYER[layer_num]
__lowerCAmelCase : Tuple = F"""down_blocks.{depth}"""
elif depth > 0 and int(__snake_case ) > 7:
__lowerCAmelCase : str = UP_NUM_TO_LAYER[layer_num]
__lowerCAmelCase : Union[str, Any] = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__lowerCAmelCase : Tuple = DEPTH_0_TO_LAYER[layer_num]
__lowerCAmelCase : Any = F"""up_blocks.{max_depth - 1}""" if int(__snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
__lowerCAmelCase : Tuple = string_left[1:]
if "resnets" in new_layer:
__lowerCAmelCase : int = convert_resconv_naming(__snake_case )
elif "attentions" in new_layer:
__lowerCAmelCase : Any = convert_attn_naming(__snake_case )
__lowerCAmelCase : List[str] = new_string_left
if not isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[str] = prefix + "." + new_layer + "." + string_left
else:
__lowerCAmelCase : Union[str, Any] = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def _lowercase ( __snake_case ) -> List[str]:
__lowerCAmelCase : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
__lowerCAmelCase : str = rename(__snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[str] = transform_conv_attns(__snake_case ,__snake_case ,__snake_case )
else:
__lowerCAmelCase : List[Any] = v
return new_state_dict
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
if len(__snake_case ) == 1:
if len(v.shape ) == 3:
# weight
__lowerCAmelCase : List[str] = v[:, :, 0]
else:
# bias
__lowerCAmelCase : str = v
else:
# qkv matrices
__lowerCAmelCase : Optional[int] = v.shape[0]
__lowerCAmelCase : Any = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowerCAmelCase : Tuple = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowerCAmelCase : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowercase ( __snake_case ) -> Tuple:
__lowerCAmelCase : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__lowerCAmelCase : Dict = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__lowerCAmelCase : int = download(__snake_case )
__lowerCAmelCase : Dict = MODELS_MAP[model_name]["sample_rate"]
__lowerCAmelCase : Optional[Any] = MODELS_MAP[model_name]["sample_size"]
__lowerCAmelCase : int = Object()
__lowerCAmelCase : str = sample_size
__lowerCAmelCase : str = sample_rate
__lowerCAmelCase : str = 0
__lowerCAmelCase : Dict = UNetaDModel(sample_size=__snake_case ,sample_rate=__snake_case )
__lowerCAmelCase : List[Any] = diffusers_model.state_dict()
__lowerCAmelCase : Union[str, Any] = DiffusionUncond(__snake_case )
orig_model.load_state_dict(torch.load(args.model_path ,map_location=__snake_case )["state_dict"] )
__lowerCAmelCase : Any = orig_model.diffusion_ema.eval()
__lowerCAmelCase : Any = orig_model.state_dict()
__lowerCAmelCase : int = rename_orig_weights(__snake_case )
__lowerCAmelCase : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowerCAmelCase : Union[str, Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__snake_case ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("kernel" ) for k in list(__snake_case ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__lowerCAmelCase : int = value.squeeze()
__lowerCAmelCase : Dict = value
diffusers_model.load_state_dict(__snake_case )
__lowerCAmelCase : List[str] = 100
__lowerCAmelCase : Dict = 33
__lowerCAmelCase : Tuple = IPNDMScheduler(num_train_timesteps=__snake_case )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(__snake_case )
__lowerCAmelCase : int = torch.randn([1, 2, config.sample_size] ,generator=__snake_case ).to(__snake_case )
__lowerCAmelCase : List[str] = torch.linspace(1 ,0 ,steps + 1 ,device=__snake_case )[:-1]
__lowerCAmelCase : Optional[int] = get_crash_schedule(__snake_case )
__lowerCAmelCase : List[str] = DanceDiffusionPipeline(unet=__snake_case ,scheduler=__snake_case )
__lowerCAmelCase : Any = torch.manual_seed(33 )
__lowerCAmelCase : Any = pipe(num_inference_steps=__snake_case ,generator=__snake_case ).audios
__lowerCAmelCase : str = sampling.iplms_sample(__snake_case ,__snake_case ,__snake_case ,{} )
__lowerCAmelCase : int = generated.clamp(-1 ,1 )
__lowerCAmelCase : Any = (generated - audio).abs().sum()
__lowerCAmelCase : List[str] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" ,__snake_case )
print("Diff max" ,__snake_case )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : List[str] = parser.parse_args()
main(args)
| 269
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 269
| 1
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 319
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int = MBartConfig
UpperCAmelCase : Any = {}
UpperCAmelCase : Tuple = '''gelu'''
def __init__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : int=99 , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=20 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Any=0 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
def lowerCAmelCase_ ( self : Tuple ):
_A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A = tf.concat([input_ids, eos_tensor] , axis=1 )
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ):
_A = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
_A = inputs_dict['input_ids']
_A = input_ids[:1, :]
_A = inputs_dict['attention_mask'][:1, :]
_A = inputs_dict['head_mask']
_A = 1
# first forward pass
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
_A , _A = outputs.to_tuple()
_A = past_key_values[1]
def _snake_case ( _snake_case : str , _snake_case : List[str] , _snake_case : str , _snake_case : Dict=None , _snake_case : Optional[int]=None , _snake_case : List[str]=None , _snake_case : str=None , _snake_case : Optional[int]=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
_A = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCAmelCase_ ( self : str ):
_A = TFMBartModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase : Dict = '''facebook/mbart-large-en-ro'''
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
_A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : Any ):
_A = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : List[Any] ):
_A = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' )
_A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def lowerCAmelCase_ ( self : Any ):
self._assert_generated_batch_equal_expected()
| 315
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return "".join(sorted(_snake_case ) )
def _snake_case ( _snake_case : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_snake_case )]
a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
a = sorted({word.strip().lower() for word in data.splitlines()})
a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 315
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=4, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_attention_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = num_choices
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : str = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = True
A : List[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('junnyu/roformer_chinese_small', from_pt=A )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
SCREAMING_SNAKE_CASE : str = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(A )[0]
SCREAMING_SNAKE_CASE : Dict = 50_000
SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size)
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3], A, atol=1E-4 ) )
| 246
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=2, A=3, A=4, A=2, A=7, A=True, A=True, A=True, A=True, A=99, A=36, A=3, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=6, A=6, A=3, A=4, A=None, A=1_000, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = text_seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : int = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Tuple = t
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : int = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[int] = model(A, pixel_values=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : List[str] = model(A, bbox=A, pixel_values=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[int] = model(A, bbox=A, pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[Any] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : str = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = False
A : List[str] = False
A : Union[str, Any] = False
A : Optional[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self, A, A, A, A, A ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(A )
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(A, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=A, )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : List[str] = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
input_ids=input_ids.to(A ), bbox=bbox.to(A ), pixel_values=pixel_values.to(A ), )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], A, atol=1E-4 ) )
| 246
| 1
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase_ = False
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self , A=32 ) -> Optional[Any]:
"""simple docstring"""
set_seed(0 )
_a = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
_a = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_a = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
_a = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_a = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
_a = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
_a = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
_a = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
_a = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_a = model(UpperCamelCase__ , timesteps[i] ).sample
_a = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_a = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
_a = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_a = model(UpperCamelCase__ , timesteps[i] ).sample
_a = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 211
|
'''simple docstring'''
class _lowercase :
def __init__( self: Optional[Any] ):
lowerCamelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase__ : List[str] = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Tuple = TrieNode()
lowerCamelCase__ : List[Any] = curr.nodes[char]
lowerCamelCase__ : Any = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Any = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
def _delete(UpperCamelCase__: TrieNode , UpperCamelCase__: str , UpperCamelCase__: int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : str = False
return len(curr.nodes ) == 0
lowerCamelCase__ : List[str] = word[index]
lowerCamelCase__ : Dict = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : List[Any] = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
if node.is_leaf:
print(UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase , word + key )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : str = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase )
# print_words(root, "")
assert all(root.find(UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
print(str(UpperCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 41
| 0
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : str = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def A_( A : Optional[Any] , A : Union[str, Any] , A : List[str] , A : List[Any]):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.''')
if tokenizer_name is None:
UpperCamelCase = TOKENIZER_CLASSES
else:
UpperCamelCase = {tokenizer_name: getattr(A , tokenizer_name + 'Fast')}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''')
for tokenizer_name in tokenizer_names:
UpperCamelCase = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase = True
if checkpoint_name is None:
UpperCamelCase = list(tokenizer_class.max_model_input_sizes.keys())
else:
UpperCamelCase = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''')
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''')
# Load tokenizer
UpperCamelCase = tokenizer_class.from_pretrained(A , force_download=A)
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''')
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase , UpperCamelCase = checkpoint.split('/')
UpperCamelCase = os.path.join(A , A)
elif add_prefix:
UpperCamelCase = checkpoint
UpperCamelCase = dump_path
else:
UpperCamelCase = None
UpperCamelCase = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''')
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]:
UpperCamelCase = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint]
UpperCamelCase = file_path.split(A)[-1][0]
if next_char == "/":
UpperCamelCase = os.path.join(A , A)
UpperCamelCase = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''')
UpperCamelCase = tokenizer.save_pretrained(
A , legacy_format=A , filename_prefix=A)
logger.info(f'''=> File names {file_names}''')
for file_name in file_names:
if not file_name.endswith('tokenizer.json'):
os.remove(A)
logger.info(f'''=> removing {file_name}''')
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCAmelCase : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 353
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase : Union[str, Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase : List[str] = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase : Dict = '▁'
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , )-> None:
'''simple docstring'''
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(A_ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A_ )
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self )-> int:
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 251
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : List[Any] = logging.get_logger(__name__)
# General docstring
_a : Optional[int] = 'RegNetConfig'
# Base docstring
_a : Union[str, Any] = 'facebook/regnet-y-040'
_a : Tuple = [1, 1_088, 7, 7]
# Image classification docstring
_a : Tuple = 'facebook/regnet-y-040'
_a : Union[str, Any] = 'tabby, tabby cat'
_a : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 3 , a__ = 1 , a__ = 1 , a__ = "relu" , ):
super().__init__()
_lowerCAmelCase : str = nn.Convad(
a__ , a__ , kernel_size=a__ , stride=a__ , padding=kernel_size // 2 , groups=a__ , bias=a__ , )
_lowerCAmelCase : str = nn.BatchNormad(a__ )
_lowerCAmelCase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = self.convolution(a__ )
_lowerCAmelCase : Union[str, Any] = self.normalization(a__ )
_lowerCAmelCase : List[Any] = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase : List[Any] = config.num_channels
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase : List[Any] = self.embedder(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 2 ):
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Convad(a__ , a__ , kernel_size=1 , stride=a__ , bias=a__ )
_lowerCAmelCase : Optional[int] = nn.BatchNormad(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = self.convolution(a__ )
_lowerCAmelCase : Tuple = self.normalization(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Dict = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase : Tuple = nn.Sequential(
nn.Convad(a__ , a__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(a__ , a__ , kernel_size=1 ) , nn.Sigmoid() , )
def __A ( self , a__ ):
# b c h w -> b c 1 1
_lowerCAmelCase : Tuple = self.pooler(a__ )
_lowerCAmelCase : int = self.attention(a__ )
_lowerCAmelCase : Optional[Any] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 1 ):
super().__init__()
_lowerCAmelCase : Optional[Any] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Tuple = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : str = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
_lowerCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : Any = hidden_state
_lowerCAmelCase : Any = self.layer(a__ )
_lowerCAmelCase : str = self.shortcut(a__ )
hidden_state += residual
_lowerCAmelCase : int = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 1 ):
super().__init__()
_lowerCAmelCase : Dict = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : List[Any] = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : Tuple = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetSELayer(a__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
_lowerCAmelCase : Dict = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = hidden_state
_lowerCAmelCase : Optional[int] = self.layer(a__ )
_lowerCAmelCase : List[Any] = self.shortcut(a__ )
hidden_state += residual
_lowerCAmelCase : List[str] = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , ):
super().__init__()
_lowerCAmelCase : List[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_lowerCAmelCase : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
a__ , a__ , a__ , stride=a__ , ) , *[layer(a__ , a__ , a__ ) for _ in range(depth - 1 )] , )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = self.layers(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a__ , config.depths[1:] ):
self.stages.append(RegNetStage(a__ , a__ , a__ , depth=a__ ) )
def __A ( self , a__ , a__ = False , a__ = True ):
_lowerCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
_lowerCAmelCase : Union[str, Any] = stage_module(a__ )
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = RegNetConfig
_UpperCamelCase : int = "regnet"
_UpperCamelCase : List[str] = "pixel_values"
_UpperCamelCase : Union[str, Any] = True
def __A ( self , a__ ):
if isinstance(a__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A ( self , a__ , a__=False ):
if isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = value
_a : List[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_a : List[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : Any = RegNetEmbeddings(a__ )
_lowerCAmelCase : List[str] = RegNetEncoder(a__ )
_lowerCAmelCase : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , a__ , a__ = None , a__ = None ):
_lowerCAmelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[Any] = self.embedder(a__ )
_lowerCAmelCase : Optional[Any] = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ )
_lowerCAmelCase : str = encoder_outputs[0]
_lowerCAmelCase : Optional[Any] = self.pooler(a__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config.num_labels
_lowerCAmelCase : Optional[int] = RegNetModel(a__ )
# classification head
_lowerCAmelCase : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Union[str, Any] = self.regnet(a__ , output_hidden_states=a__ , return_dict=a__ )
_lowerCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : str = self.classifier(a__ )
_lowerCAmelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Optional[int] = """single_label_classification"""
else:
_lowerCAmelCase : Optional[int] = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[Any] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : Dict = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : Dict = BCEWithLogitsLoss()
_lowerCAmelCase : Tuple = loss_fct(a__ , a__ )
if not return_dict:
_lowerCAmelCase : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 44
|
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any:
'''simple docstring'''
A__ = 1
A__ = 2
while i * i <= n:
A__ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 68
| 0
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
A__ = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
A__ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
A__ = primes[:idx]
break
A__ , A__ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A__ = False
for r in range(UpperCAmelCase_ ):
A__ = pow(UpperCAmelCase_ , d * 2**r , UpperCAmelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A__ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _snake_case ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 69
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE_ : List[str] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
SCREAMING_SNAKE_CASE_ : List[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCamelCase ( self: int , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = TER(
normalized=UpperCamelCase , no_punct=UpperCamelCase , asian_support=UpperCamelCase , case_sensitive=UpperCamelCase , )
A__ = sb_ter.corpus_score(UpperCamelCase , UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 69
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
UpperCamelCase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
UpperCamelCase = {
'''ctrl''': 256,
}
UpperCamelCase = {
'''Pregnancy''': 168629,
'''Christianity''': 7675,
'''Explain''': 106423,
'''Fitness''': 63440,
'''Saving''': 63163,
'''Ask''': 27171,
'''Ass''': 95985,
'''Joke''': 163509,
'''Questions''': 45622,
'''Thoughts''': 49605,
'''Retail''': 52342,
'''Feminism''': 164338,
'''Writing''': 11992,
'''Atheism''': 192263,
'''Netflix''': 48616,
'''Computing''': 39639,
'''Opinion''': 43213,
'''Alone''': 44967,
'''Funny''': 58917,
'''Gaming''': 40358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 77138,
'''Diet''': 36206,
'''Legal''': 11859,
'''Norman''': 4939,
'''Tip''': 72689,
'''Weight''': 52343,
'''Movies''': 46273,
'''Running''': 23425,
'''Science''': 2090,
'''Horror''': 37793,
'''Confession''': 60572,
'''Finance''': 12250,
'''Politics''': 16360,
'''Scary''': 191985,
'''Support''': 12654,
'''Technologies''': 32516,
'''Teenage''': 66160,
'''Event''': 32769,
'''Learned''': 67460,
'''Notion''': 182770,
'''Wikipedia''': 37583,
'''Books''': 6665,
'''Extract''': 76050,
'''Confessions''': 102701,
'''Conspiracy''': 75932,
'''Links''': 63674,
'''Narcissus''': 150425,
'''Relationship''': 54766,
'''Relationships''': 134796,
'''Reviews''': 41671,
'''News''': 4256,
'''Translation''': 26820,
'''multilingual''': 128406,
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]:
A: Dict = set()
A: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: Optional[int] = char
A: Union[str, Any] = set(__lowercase )
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = CONTROL_CODES
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any="<unk>" , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
'''simple docstring'''
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: int = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: List[Any] = merges_handle.read().split('''\n''' )[1:-1]
A: Any = [tuple(merge.split() ) for merge in merges]
A: Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Any = {}
@property
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
A: int = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Union[str, Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A: Any = bigram
A: Optional[int] = []
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Optional[Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[int] = tuple(SCREAMING_SNAKE_CASE_ )
A: List[Any] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Tuple = get_pairs(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
A: Any = word[:-4]
A: Tuple = word
return word
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: int = []
A: Any = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A: Tuple = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: Any = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Optional[int] = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Optional[Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 350
|
'''simple docstring'''
import requests
UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
# fetching a list of articles in json format
A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 334
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Any = BertTokenizer
__a : Any = BertTokenizerFast
__a : Optional[Any] = True
__a : Tuple = True
__a : Union[str, Any] = filter_non_english
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __snake_case ( self , _A ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = """UNwant\u00E9d,running"""
_UpperCAmelCase : Tuple = """unwanted, running"""
return input_text, output_text
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def __snake_case ( self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_rust_tokenizer()
_UpperCAmelCase : Tuple = """UNwant\u00E9d,running"""
_UpperCAmelCase : List[Any] = tokenizer.tokenize(_A )
_UpperCAmelCase : str = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : Tuple = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCAmelCase : List[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = tokenizer.encode(_A )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
_UpperCAmelCase : Optional[int] = self.get_tokenizer(do_lower_case=_A )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer(do_lower_case=_A )
_UpperCAmelCase : str = """UNwant\u00E9d,running"""
_UpperCAmelCase : Tuple = tokenizer.tokenize(_A )
_UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : List[Any] = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCAmelCase : List[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : Tuple = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[int] = tokenizer.encode(_A )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=_A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = BasicTokenizer()
_UpperCAmelCase : str = """a\n'll !!to?'d of, can't."""
_UpperCAmelCase : str = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(_A ) , _A )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_UpperCAmelCase : Optional[Any] = {}
for i, token in enumerate(_A ):
_UpperCAmelCase : Optional[Any] = i
_UpperCAmelCase : int = WordpieceTokenizer(vocab=_A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __snake_case ( self ) -> int:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __snake_case ( self ) -> int:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
_UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
_UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __snake_case ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : str = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCAmelCase : Union[str, Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
_UpperCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(_A , """do_lower_case""" ) else False
_UpperCAmelCase : Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ["""的""", """人""", """有"""]
_UpperCAmelCase : Optional[int] = """""".join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : str = True
_UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : Any = tokenizer_p.encode(_A , add_special_tokens=_A )
_UpperCAmelCase : Dict = tokenizer_r.encode(_A , add_special_tokens=_A )
_UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(_A )
_UpperCAmelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : int = False
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : Any = tokenizer_r.encode(_A , add_special_tokens=_A )
_UpperCAmelCase : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
_UpperCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(_A )
_UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase : Union[str, Any] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 246
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[str] = 1
while repunit:
_UpperCAmelCase : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
try:
SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE = strtobool(_UpperCAmelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''')
return _value
a_ : str = parse_flag_from_env('RUN_SLOW', default=False)
a_ : Optional[int] = parse_flag_from_env('RUN_REMOTE', default=False)
a_ : Optional[int] = parse_flag_from_env('RUN_LOCAL', default=True)
a_ : Union[str, Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a_ : Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a_ : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a_ : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a_ : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a_ : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a_ : Optional[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a_ : List[str] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCamelCase__ (_UpperCAmelCase):
try:
import faiss # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires faiss')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import regex # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires regex')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import elasticsearch # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires elasticsearch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import sqlalchemy # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires sqlalchemy')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TORCH_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires PyTorch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TF_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires TensorFlow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.JAX_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires JAX')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.PIL_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires Pillow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
def _require_spacy_model(_UpperCAmelCase):
try:
import spacy # noqa F401
spacy.load(_UpperCAmelCase)
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_UpperCAmelCase))(_UpperCAmelCase)
else:
return test_case
return _require_spacy_model
def lowerCamelCase__ (_UpperCAmelCase):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_slow_tests or _run_slow_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is slow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_local_tests or _run_local_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is local')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_packaged_tests or _run_packaged_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is packaged')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_remote_tests or _run_remote_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test requires remote')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (*_UpperCAmelCase):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(_UpperCAmelCase) and name.startswith('test'):
for decorator in decorators:
SCREAMING_SNAKE_CASE = decorator(_UpperCAmelCase)
setattr(cls , _UpperCAmelCase , _UpperCAmelCase)
return cls
return decorate
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = 0
_lowercase : List[Any] = 1
_lowercase : Optional[Any] = 2
@contextmanager
def lowerCamelCase__ (_UpperCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCAmelCase=1e-16):
SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
# Change the url to an invalid url so that the connection hangs
SCREAMING_SNAKE_CASE = 'https://10.255.255.1'
if kwargs.get('timeout') is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
SCREAMING_SNAKE_CASE = timeout
try:
return online_request(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
SCREAMING_SNAKE_CASE = url
SCREAMING_SNAKE_CASE = e.args[0]
SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]'''),)
SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
raise requests.ConnectionError('Offline mode is enabled.' , request=_UpperCAmelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.')
@contextmanager
def lowerCamelCase__ (*_UpperCAmelCase , **_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(Path().resolve())
with tempfile.TemporaryDirectory(*_UpperCAmelCase , **_UpperCAmelCase) as tmp_dir:
try:
os.chdir(_UpperCAmelCase)
yield
finally:
os.chdir(_UpperCAmelCase)
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist() == deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist()
def lowerCamelCase__ (_UpperCAmelCase):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
try:
return func(*_UpperCAmelCase , **_UpperCAmelCase)
except HTTPError as err:
if str(_UpperCAmelCase).startswith('500') or str(_UpperCAmelCase).startswith('502'):
pytest.xfail(str(_UpperCAmelCase))
raise err
return decorator.decorator(_wrapper , _UpperCAmelCase)
class _snake_case :
def __init__( self , a , a , a) -> str:
SCREAMING_SNAKE_CASE = returncode
SCREAMING_SNAKE_CASE = stdout
SCREAMING_SNAKE_CASE = stderr
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
while True:
SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(_UpperCAmelCase)
else:
break
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False):
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def tee(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=""):
SCREAMING_SNAKE_CASE = line.decode('utf-8').rstrip()
sink.append(_UpperCAmelCase)
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:')),
_read_stream(p.stderr , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:')),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=180 , _UpperCAmelCase=False , _UpperCAmelCase=True):
SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase))
SCREAMING_SNAKE_CASE = ' '.join(_UpperCAmelCase)
if result.returncode > 0:
SCREAMING_SNAKE_CASE = '\n'.join(result.stderr)
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''')
return result
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0')
SCREAMING_SNAKE_CASE = re.sub(R'^gw' , '' , _UpperCAmelCase , 0 , re.M)
return int(_UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 2_9500
SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 327
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a , cache_dir=a)
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(a , os.listdir(a)[0] , 'snapshots'))]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(a , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(a) == num_samples
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=a , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=a , safety_checker=a , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0) , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , use_memory_efficient_attention=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 327
| 1
|
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Union[str, Any] = 1
UpperCamelCase :Any = 2
while i * i <= n:
UpperCamelCase :str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _A ( ):
UpperCamelCase :List[str] = 1
UpperCamelCase :List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(__UpperCamelCase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 259
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MaskFormerFeatureExtractor"]
UpperCamelCase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCamelCase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 251
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Dict = 16
_SCREAMING_SNAKE_CASE : Optional[int] = 32
def UpperCamelCase_( snake_case : Accelerator , snake_case : int = 1_6 ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
snake_case , batched=snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
snake_case , padding="longest" , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
snake_case_ = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : List[str] = mocked_dataloaders # noqa: F811
def UpperCamelCase_( snake_case : str , snake_case : Optional[int] ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case ) == "1":
snake_case_ = 2
# New Code #
snake_case_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["lr"]
snake_case_ = int(config["num_epochs"] )
snake_case_ = int(config["seed"] )
snake_case_ = int(config["batch_size"] )
snake_case_ = evaluate.load("glue" , "mrpc" )
set_seed(snake_case )
snake_case_ , snake_case_ = get_dataloaders(snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case ):
snake_case_ = model(**snake_case )
snake_case_ = output.loss
accelerator.backward(snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , snake_case )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=snake_case , default=snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
snake_case_ = parser.parse_args()
snake_case_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 369
|
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
while b:
snake_case_ , snake_case_ = b, a % b
return a
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(snake_case , a % b )
def UpperCamelCase_( ):
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 92
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__) -> str:
snake_case_ = data
def __iter__( self) -> List[Any]:
for element in self.data:
yield element
def UpperCAmelCase ( UpperCAmelCase=True ) -> int:
snake_case_ = Accelerator(even_batches=UpperCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> List[str]:
if iterable:
snake_case_ = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase ) ) )
else:
snake_case_ = TensorDataset(torch.as_tensor(range(UpperCAmelCase ) ) )
snake_case_ = DataLoader(UpperCAmelCase , batch_size=UpperCAmelCase )
snake_case_ = accelerator.prepare(UpperCAmelCase )
return dl
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
snake_case_ = create_dataloader(accelerator=UpperCAmelCase , dataset_size=UpperCAmelCase , batch_size=UpperCAmelCase )
snake_case_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase ( ) -> Tuple:
snake_case_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase ( ) -> str:
snake_case_ = create_accelerator(even_batches=UpperCAmelCase )
verify_dataloader_batch_sizes(
UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase ( ) -> int:
snake_case_ = create_accelerator(even_batches=UpperCAmelCase )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(UpperCAmelCase )
snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 )
snake_case_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase ):
snake_case_ = ddp_model(batch[0].float() )
snake_case_ = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
with warnings.catch_warnings(record=UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase ( ) -> Optional[int]:
snake_case_ = True
snake_case_ = False
snake_case_ = create_accelerator(even_batches=UpperCAmelCase )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(UpperCAmelCase )
snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 )
snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase ):
snake_case_ = train_dl.batch_sampler.even_batches
snake_case_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase ( ) -> int:
snake_case_ = True
snake_case_ = False
snake_case_ = create_accelerator(even_batches=UpperCAmelCase )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(UpperCAmelCase )
create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase )
snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase ):
snake_case_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase ( ) -> Any:
snake_case_ = create_accelerator()
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(UpperCAmelCase )
create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase )
with warnings.catch_warnings(record=UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase ):
pass
assert issubclass(w[-1].category , UpperCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase ( ) -> Optional[int]:
snake_case_ = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
snake_case_ = accelerator.state.distributed_type
snake_case_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase )
snake_case_ = original_state
if __name__ == "__main__":
main()
| 69
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''▁'''
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__UpperCamelCase = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model)
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
snake_case_ = src_lang if src_lang is not None else 'eng_Latn'
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Union[str, Any]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCAmelCase__) -> Tuple:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def a_ ( self) -> str:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a_ ( self) -> str:
return self._src_lang
@src_lang.setter
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
snake_case_ = [1] * len(self.prefix_tokens)
snake_case_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
snake_case_ = src_lang
snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
snake_case_ = tgt_lang_id
return inputs
def a_ ( self) -> List[Any]:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self, lowerCAmelCase__) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def a_ ( self, lowerCAmelCase__) -> List[str]:
snake_case_ = ''.join(lowerCAmelCase__).replace(lowerCAmelCase__, ' ').strip()
return out_string
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
| 69
| 1
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A_ : Optional[int] =False
A_ : int =logging.get_logger(__name__)
A_ : List[str] ="""ybelkada/fonts"""
def SCREAMING_SNAKE_CASE_ ( )-> Union[str, Any]:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : Optional[int] , snake_case : List[Any] )-> Dict:
requires_backends(snake_case , ['torch'] )
_check_torch_version()
_lowerCamelCase = image_tensor.unsqueeze(0 )
_lowerCamelCase = torch.nn.functional.unfold(snake_case , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_lowerCamelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , snake_case , snake_case , -1 )
_lowerCamelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : int = 36 , snake_case : str = "black" , snake_case : str = "white" , snake_case : int = 5 , snake_case : int = 5 , snake_case : int = 5 , snake_case : int = 5 , snake_case : Optional[bytes] = None , snake_case : Optional[str] = None , )-> Image.Image:
requires_backends(snake_case , 'vision' )
# Add new lines so that each line is no more than 80 characters.
_lowerCamelCase = textwrap.TextWrapper(width=80 )
_lowerCamelCase = wrapper.wrap(text=snake_case )
_lowerCamelCase = '\n'.join(snake_case )
if font_bytes is not None and font_path is None:
_lowerCamelCase = io.BytesIO(snake_case )
elif font_path is not None:
_lowerCamelCase = font_path
else:
_lowerCamelCase = hf_hub_download(snake_case , 'Arial.TTF' )
_lowerCamelCase = ImageFont.truetype(snake_case , encoding='UTF-8' , size=snake_case )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCamelCase = ImageDraw.Draw(Image.new('RGB' , (1, 1) , snake_case ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = temp_draw.textbbox((0, 0) , snake_case , snake_case )
# Create the actual image with a bit of padding around the text.
_lowerCamelCase = text_width + left_padding + right_padding
_lowerCamelCase = text_height + top_padding + bottom_padding
_lowerCamelCase = Image.new('RGB' , (image_width, image_height) , snake_case )
_lowerCamelCase = ImageDraw.Draw(snake_case )
draw.text(xy=(left_padding, top_padding) , text=snake_case , fill=snake_case , font=snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray , snake_case : str , **snake_case : List[Any] )-> Union[str, Any]:
requires_backends(snake_case , 'vision' )
# Convert to PIL image if necessary
_lowerCamelCase = to_pil_image(snake_case )
_lowerCamelCase = render_text(snake_case , **snake_case )
_lowerCamelCase = max(header_image.width , image.width )
_lowerCamelCase = int(image.height * (new_width / image.width) )
_lowerCamelCase = int(header_image.height * (new_width / header_image.width) )
_lowerCamelCase = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCamelCase = to_numpy_array(snake_case )
if infer_channel_dimension_format(snake_case ) == ChannelDimension.LAST:
_lowerCamelCase = to_channel_dimension_format(snake_case , ChannelDimension.LAST )
return new_image
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["flattened_patches"]
def __init__( self , a__ = True , a__ = True , a__ = None , a__ = 20_48 , a__ = False , **a__ , ):
super().__init__(**a__ )
_lowerCamelCase = patch_size if patch_size is not None else {'height': 16, 'width': 16}
_lowerCamelCase = do_normalize
_lowerCamelCase = do_convert_rgb
_lowerCamelCase = max_patches
_lowerCamelCase = is_vqa
def snake_case_ ( self , a__ , a__ , a__ , **a__ ):
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
_lowerCamelCase = to_channel_dimension_format(a__ , ChannelDimension.FIRST )
_lowerCamelCase = torch.from_numpy(a__ )
_lowerCamelCase , _lowerCamelCase = patch_size['height'], patch_size['width']
_lowerCamelCase , _lowerCamelCase = get_image_size(a__ )
# maximize scale s.t.
_lowerCamelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCamelCase = max(min(math.floor(scale * image_height / patch_height ) , a__ ) , 1 )
_lowerCamelCase = max(min(math.floor(scale * image_width / patch_width ) , a__ ) , 1 )
_lowerCamelCase = max(num_feasible_rows * patch_height , 1 )
_lowerCamelCase = max(num_feasible_cols * patch_width , 1 )
_lowerCamelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=a__ , antialias=a__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCamelCase = torch_extract_patches(a__ , a__ , a__ )
_lowerCamelCase = patches.shape
_lowerCamelCase = patches_shape[1]
_lowerCamelCase = patches_shape[2]
_lowerCamelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCamelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCamelCase = torch.arange(a__ ).reshape([rows, 1] ).repeat(1 , a__ ).reshape([rows * columns, 1] )
_lowerCamelCase = torch.arange(a__ ).reshape([1, columns] ).repeat(a__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCamelCase = row_ids.to(torch.floataa )
_lowerCamelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase = torch.nn.functional.pad(a__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCamelCase = to_numpy_array(a__ )
return result
def snake_case_ ( self , a__ , a__ = None , **a__ ):
if image.dtype == np.uinta:
_lowerCamelCase = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCamelCase = np.mean(a__ )
_lowerCamelCase = np.std(a__ )
_lowerCamelCase = max(a__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(a__ , mean=a__ , std=a__ , **a__ )
def snake_case_ ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase = patch_size if patch_size is not None else self.patch_size
_lowerCamelCase = max_patches if max_patches is not None else self.max_patches
_lowerCamelCase = self.is_vqa
if kwargs.get('data_format' , a__ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
_lowerCamelCase = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(a__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
_lowerCamelCase = kwargs.pop('font_bytes' , a__ )
_lowerCamelCase = kwargs.pop('font_path' , a__ )
if isinstance(a__ , a__ ):
_lowerCamelCase = [header_text] * len(a__ )
_lowerCamelCase = [
render_header(a__ , header_text[i] , font_bytes=a__ , font_path=a__ )
for i, image in enumerate(a__ )
]
if do_normalize:
_lowerCamelCase = [self.normalize(image=a__ ) for image in images]
# convert to torch tensor and permute
_lowerCamelCase = [
self.extract_flattened_patches(image=a__ , max_patches=a__ , patch_size=a__ )
for image in images
]
# create attention mask in numpy
_lowerCamelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCamelCase = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=a__ )
return encoded_outputs
| 80
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : "DiagonalGaussianDistribution"
class __a ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = True
@register_to_config
def __init__( self , a__ = 3 , a__ = 3 , a__ = ("DownEncoderBlock2D",) , a__ = ("UpDecoderBlock2D",) , a__ = (64,) , a__ = 1 , a__ = "silu" , a__ = 4 , a__ = 32 , a__ = 32 , a__ = 0.18215 , ):
super().__init__()
# pass init params to Encoder
_lowerCamelCase = Encoder(
in_channels=a__ , out_channels=a__ , down_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , act_fn=a__ , norm_num_groups=a__ , double_z=a__ , )
# pass init params to Decoder
_lowerCamelCase = Decoder(
in_channels=a__ , out_channels=a__ , up_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , norm_num_groups=a__ , act_fn=a__ , )
_lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_lowerCamelCase = nn.Convad(a__ , a__ , 1 )
_lowerCamelCase = False
_lowerCamelCase = False
# only relevant if vae tiling is enabled
_lowerCamelCase = self.config.sample_size
_lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_lowerCamelCase = 0.25
def snake_case_ ( self , a__ , a__=False ):
if isinstance(a__ , (Encoder, Decoder) ):
_lowerCamelCase = value
def snake_case_ ( self , a__ = True ):
_lowerCamelCase = use_tiling
def snake_case_ ( self ):
self.enable_tiling(a__ )
def snake_case_ ( self ):
_lowerCamelCase = True
def snake_case_ ( self ):
_lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case_ ( self ):
_lowerCamelCase = {}
def fn_recursive_add_processors(a__ , a__ , a__ ):
if hasattr(a__ , 'set_processor' ):
_lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , a__ , a__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a__ , a__ , a__ )
return processors
def snake_case_ ( self , a__ ):
_lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(a__ , a__ ) and len(a__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(a__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(a__ , a__ , a__ ):
if hasattr(a__ , 'set_processor' ):
if not isinstance(a__ , a__ ):
module.set_processor(a__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , a__ , a__ )
for name, module in self.named_children():
fn_recursive_attn_processor(a__ , a__ , a__ )
def snake_case_ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def snake_case_ ( self , a__ , a__ = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(a__ , return_dict=a__ )
if self.use_slicing and x.shape[0] > 1:
_lowerCamelCase = [self.encoder(a__ ) for x_slice in x.split(1 )]
_lowerCamelCase = torch.cat(a__ )
else:
_lowerCamelCase = self.encoder(a__ )
_lowerCamelCase = self.quant_conv(a__ )
_lowerCamelCase = DiagonalGaussianDistribution(a__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a__ )
def snake_case_ ( self , a__ , a__ = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(a__ , return_dict=a__ )
_lowerCamelCase = self.post_quant_conv(a__ )
_lowerCamelCase = self.decoder(a__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
@apply_forward_hook
def snake_case_ ( self , a__ , a__ = True ):
if self.use_slicing and z.shape[0] > 1:
_lowerCamelCase = [self._decode(a__ ).sample for z_slice in z.split(1 )]
_lowerCamelCase = torch.cat(a__ )
else:
_lowerCamelCase = self._decode(a__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=a__ )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = min(a.shape[2] , b.shape[2] , a__ )
for y in range(a__ ):
_lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = min(a.shape[3] , b.shape[3] , a__ )
for x in range(a__ ):
_lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case_ ( self , a__ , a__ = True ):
_lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowerCamelCase = []
for i in range(0 , x.shape[2] , a__ ):
_lowerCamelCase = []
for j in range(0 , x.shape[3] , a__ ):
_lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowerCamelCase = self.encoder(a__ )
_lowerCamelCase = self.quant_conv(a__ )
row.append(a__ )
rows.append(a__ )
_lowerCamelCase = []
for i, row in enumerate(a__ ):
_lowerCamelCase = []
for j, tile in enumerate(a__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , a__ , a__ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , a__ , a__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a__ , dim=3 ) )
_lowerCamelCase = torch.cat(a__ , dim=2 )
_lowerCamelCase = DiagonalGaussianDistribution(a__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a__ )
def snake_case_ ( self , a__ , a__ = True ):
_lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowerCamelCase = []
for i in range(0 , z.shape[2] , a__ ):
_lowerCamelCase = []
for j in range(0 , z.shape[3] , a__ ):
_lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowerCamelCase = self.post_quant_conv(a__ )
_lowerCamelCase = self.decoder(a__ )
row.append(a__ )
rows.append(a__ )
_lowerCamelCase = []
for i, row in enumerate(a__ ):
_lowerCamelCase = []
for j, tile in enumerate(a__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , a__ , a__ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , a__ , a__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a__ , dim=3 ) )
_lowerCamelCase = torch.cat(a__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
def snake_case_ ( self , a__ , a__ = False , a__ = True , a__ = None , ):
_lowerCamelCase = sample
_lowerCamelCase = self.encode(a__ ).latent_dist
if sample_posterior:
_lowerCamelCase = posterior.sample(generator=a__ )
else:
_lowerCamelCase = posterior.mode()
_lowerCamelCase = self.decode(a__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
| 80
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.