code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def lowercase ( _snake_case : int , _snake_case : int ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__snake_case : Tuple = str(bin(_snake_case ) )[2:] # remove the leading "0b"
__snake_case : List[Any] = str(bin(_snake_case ) )[2:]
__snake_case : Any = max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase__ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , lowercase = None) -> Union[str, Any]:
'''simple docstring'''
a__: List[Any] = None
a__: Optional[Any] = os.path.abspath(os.path.join('examples' , 'by_feature'))
a__: Optional[Any] = os.path.abspath('examples')
for item in os.listdir(lowercase):
if item not in EXCLUDE_EXAMPLES:
a__: Dict = os.path.join(lowercase , lowercase)
if os.path.isfile(lowercase) and ".py" in item_path:
with self.subTest(
tested_script=lowercase , feature_script=lowercase , tested_section='main()' if parser_only else 'training_function()' , ):
a__: List[Any] = compare_against_test(
os.path.join(lowercase , lowercase) , lowercase , lowercase , lowercase)
a__: Dict = '\n'.join(lowercase)
if special_strings is not None:
for string in special_strings:
a__: Union[str, Any] = diff.replace(lowercase , '')
self.assertEqual(lowercase , '')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py' , lowercase)
self.one_complete_example('complete_nlp_example.py' , lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py'))
a__: Tuple = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , lowercase , lowercase , lowercase)
self.one_complete_example('complete_cv_example.py' , lowercase , lowercase , lowercase)
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __snake_case ( __lowerCAmelCase ):
a__ = False
@classmethod
def lowerCamelCase_ ( cls) -> List[str]:
'''simple docstring'''
super().setUpClass()
a__: Dict = tempfile.mkdtemp()
a__: Optional[int] = os.path.join(cls._tmpdir , 'default_config.yml')
write_basic_config(save_location=cls.configPath)
a__: List[str] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls) -> List[str]:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0')))
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Any = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
a__: Tuple = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2')))
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0")}\n '.split()
a__: Optional[int] = run_command(self._launch_args + testargs , return_stdout=lowercase)
self.assertNotIn('epoch 0:' , lowercase)
self.assertIn('epoch 1:' , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: int = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2")}\n '.split()
a__: List[str] = run_command(self._launch_args + testargs , return_stdout=lowercase)
if torch.cuda.is_available():
a__: Union[str, Any] = torch.cuda.device_count()
else:
a__: Optional[int] = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , lowercase)
self.assertIn('epoch 1:' , lowercase)
else:
self.assertIn('epoch 0:' , lowercase)
self.assertIn('epoch 1:' , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'}):
a__: Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=lowercase)
a__: Union[str, Any] = re.findall('({.+})' , lowercase)
a__: Dict = [r for r in results if 'accuracy' in r][-1]
a__: Optional[Any] = ast.literal_eval(lowercase)
self.assertGreaterEqual(results['accuracy'] , 0.75)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: str = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'})
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
a__: List[str] = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(lowercase , 'tracking')))
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: int = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 203
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = PriorTransformer
a__ = """hidden_states"""
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = 4
a__: Any = 8
a__: Optional[Any] = 7
a__: Tuple = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: Optional[int] = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self , lowercase=0) -> str:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Optional[Any] = 4
a__: Optional[Any] = 8
a__: Union[str, Any] = 7
a__: Optional[Any] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: Tuple = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return (4, 8)
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return (4, 8)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
a__: Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: Union[str, Any] = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(lowercase)
a__: Any = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: Tuple = self.prepare_init_args_and_inputs_for_common()
a__: Any = self.model_class(**lowercase)
a__: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Tuple = [*signature.parameters.keys()]
a__: List[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy')
a__: str = model.to(lowercase)
if hasattr(lowercase , 'set_default_attn_processor'):
model.set_default_attn_processor()
a__: Dict = self.get_dummy_seed_input()
with torch.no_grad():
a__: str = model(**lowercase)[0]
a__: str = output[0, :5].flatten().cpu()
print(lowercase)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a__: Any = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2))
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self , lowercase=1 , lowercase=7_68 , lowercase=77 , lowercase=0) -> int:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Union[str, Any] = batch_size
a__: List[str] = embedding_dim
a__: str = num_embeddings
a__: Tuple = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: str = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowerCamelCase_ ( self , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Tuple = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior')
model.to(lowercase)
a__: Optional[Any] = self.get_dummy_seed_input(seed=lowercase)
with torch.no_grad():
a__: Optional[int] = model(**lowercase)[0]
assert list(sample.shape) == [1, 7_68]
a__: List[str] = sample[0, :8].flatten().cpu()
print(lowercase)
a__: Union[str, Any] = torch.tensor(lowercase)
assert torch_all_close(lowercase , lowercase , atol=1e-3)
| 203
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowercase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase ( A_ , A_ , A_=None )-> List[Any]:
'''simple docstring'''
if rng is None:
a : Union[str, Any] = random.Random()
a : Tuple = 1
for dim in shape:
total_dims *= dim
a : Optional[Any] = []
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
a : Dict = np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase ( A_ , A_=None )-> List[str]:
'''simple docstring'''
a : Optional[int] = ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
a : Tuple = 1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = ()
def __snake_case ( self : Optional[int]):
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a : Dict = 2
a : Tuple = inputs["input_ids"].shape[-1] // 2
a : List[Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
a : str = jnp.ones_like(__UpperCAmelCase)
a : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a : Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a : int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __snake_case ( self : Optional[Any]):
a , a , a , a : Optional[int] = self._get_input_ids_and_config()
a : Union[str, Any] = False
a : str = max_length
a : Dict = 0
for model_class in self.all_generative_model_classes:
a : Union[str, Any] = model_class(__UpperCAmelCase)
a : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
a : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase)
a : int = pt_model_class(__UpperCAmelCase).eval()
a : str = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params)
a : Tuple = flax_model.generate(__UpperCAmelCase).sequences
a : Tuple = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def __snake_case ( self : Dict):
a , a , a , a : Dict = self._get_input_ids_and_config()
a : Optional[Any] = False
a : Dict = max_length
for model_class in self.all_generative_model_classes:
a : Optional[int] = model_class(__UpperCAmelCase)
a : List[str] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : str = jit(model.generate)
a : List[str] = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Optional[int]):
a , a , a , a : Dict = self._get_input_ids_and_config()
a : Union[str, Any] = True
a : List[Any] = max_length
for model_class in self.all_generative_model_classes:
a : Dict = model_class(__UpperCAmelCase)
a : str = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Optional[int] = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Union[str, Any]):
a , a , a , a : List[Any] = self._get_input_ids_and_config()
a : List[str] = False
a : str = max_length
a : List[Any] = 2
for model_class in self.all_generative_model_classes:
a : Tuple = model_class(__UpperCAmelCase)
a : Union[str, Any] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : List[Any] = jit(model.generate)
a : Optional[Any] = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : int):
a , a , a , a : Any = self._get_input_ids_and_config()
a : int = False
a : str = max_length
a : str = 2
a : Optional[int] = 2
for model_class in self.all_generative_model_classes:
a : str = model_class(__UpperCAmelCase)
a : str = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def __snake_case ( self : List[Any]):
a , a , a , a : Optional[int] = self._get_input_ids_and_config()
a : Any = True
a : List[Any] = max_length
a : Tuple = 0.8
a : int = 10
a : Union[str, Any] = 0.3
a : Any = 1
a : Optional[int] = 8
a : Any = 9
for model_class in self.all_generative_model_classes:
a : int = model_class(__UpperCAmelCase)
a : Any = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Optional[Any] = jit(model.generate)
a : Any = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Optional[int]):
a , a , a , a : str = self._get_input_ids_and_config()
a : Tuple = max_length
a : int = 1
a : int = 8
a : List[Any] = 9
for model_class in self.all_generative_model_classes:
a : List[Any] = model_class(__UpperCAmelCase)
a : Optional[Any] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Any = jit(model.generate)
a : List[str] = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Dict):
a , a , a , a : int = self._get_input_ids_and_config()
a : Optional[Any] = max_length
a : Dict = 2
a : Tuple = 1
a : Optional[Any] = 8
a : Dict = 9
for model_class in self.all_generative_model_classes:
a : Any = model_class(__UpperCAmelCase)
a : List[str] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Dict = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Optional[int]):
a , a , a , a : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
a : Optional[int] = attention_mask.at[(0, 0)].set(0)
a : List[str] = False
a : str = max_length
for model_class in self.all_generative_model_classes:
a : Any = model_class(__UpperCAmelCase)
a : Any = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : List[Any] = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : List[Any]):
a , a , a , a : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
a : Tuple = attention_mask.at[(0, 0)].set(0)
a : Any = True
a : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
a : Dict = model_class(__UpperCAmelCase)
a : List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Dict = jit(model.generate)
a : Dict = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Dict):
a , a , a , a : Any = self._get_input_ids_and_config()
# pad attention mask on the left
a : Dict = attention_mask.at[(0, 0)].set(0)
a : str = 2
a : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
a : List[str] = model_class(__UpperCAmelCase)
a : int = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : str = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
a : int = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
a : Union[str, Any] = "Hello world"
a : str = tokenizer(__UpperCAmelCase , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , "do_samples"):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , "foo"):
a : Tuple = {"foo": "bar"}
model.generate(__UpperCAmelCase , **__UpperCAmelCase)
| 40
|
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if "cls_token" in name:
lowerCAmelCase : str = name.replace('cls_token', 'vit.embeddings.cls_token' )
if "mask_token" in name:
lowerCAmelCase : str = name.replace('mask_token', 'decoder.mask_token' )
if "decoder_pos_embed" in name:
lowerCAmelCase : List[str] = name.replace('decoder_pos_embed', 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase : Dict = name.replace('pos_embed', 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('patch_embed.proj', 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase : Union[str, Any] = name.replace('patch_embed.norm', 'vit.embeddings.norm' )
if "decoder_blocks" in name:
lowerCAmelCase : Dict = name.replace('decoder_blocks', 'decoder.decoder_layers' )
if "blocks" in name:
lowerCAmelCase : Tuple = name.replace('blocks', 'vit.encoder.layer' )
if "attn.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase : Optional[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
lowerCAmelCase : Any = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase : Dict = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase : Tuple = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase : Union[str, Any] = name.replace('mlp.fc2', 'output.dense' )
if "decoder_embed" in name:
lowerCAmelCase : Dict = name.replace('decoder_embed', 'decoder.decoder_embed' )
if "decoder_norm" in name:
lowerCAmelCase : Tuple = name.replace('decoder_norm', 'decoder.decoder_norm' )
if "decoder_pred" in name:
lowerCAmelCase : Optional[Any] = name.replace('decoder_pred', 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
lowerCAmelCase : List[Any] = name.replace('norm.weight', 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
lowerCAmelCase : Optional[Any] = name.replace('norm.bias', 'vit.layernorm.bias' )
return name
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Union[str, Any] = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase : Any = key.split('.' )
lowerCAmelCase : Optional[Any] = int(key_split[1] )
if "decoder_blocks" in key:
lowerCAmelCase : Optional[int] = config.decoder_hidden_size
lowerCAmelCase : str = 'decoder.decoder_layers.'
if "weight" in key:
lowerCAmelCase : Tuple = val[:dim, :]
lowerCAmelCase : Dict = val[dim : dim * 2, :]
lowerCAmelCase : Optional[int] = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase : List[Any] = val[:dim]
lowerCAmelCase : Dict = val[dim : dim * 2]
lowerCAmelCase : Any = val[-dim:]
else:
lowerCAmelCase : Optional[int] = config.hidden_size
lowerCAmelCase : int = 'vit.encoder.layer.'
if "weight" in key:
lowerCAmelCase : Tuple = val[:dim, :]
lowerCAmelCase : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase : List[str] = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase : List[Any] = val[:dim]
lowerCAmelCase : Tuple = val[dim : dim * 2]
lowerCAmelCase : List[str] = val[-dim:]
else:
lowerCAmelCase : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCAmelCase : int = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : Dict = 24
lowerCAmelCase : str = 16
elif "huge" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = 14
lowerCAmelCase : str = 1_280
lowerCAmelCase : int = 5_120
lowerCAmelCase : Any = 32
lowerCAmelCase : Union[str, Any] = 16
lowerCAmelCase : Optional[int] = ViTMAEForPreTraining(_UpperCAmelCase )
lowerCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['model']
lowerCAmelCase : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase : List[Any] = convert_state_dict(_UpperCAmelCase, _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : List[Any] = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
lowerCAmelCase : int = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase : int = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Optional[int] = model(**_UpperCAmelCase )
lowerCAmelCase : int = outputs.logits
if "large" in checkpoint_url:
lowerCAmelCase : str = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowerCAmelCase : Tuple = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowerCAmelCase : List[Any] = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : Tuple = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( UpperCamelCase=None ):
"""simple docstring"""
if subparsers is not None:
lowerCAmelCase__ : List[str] = subparsers.add_parser("""test""" )
else:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=a__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowerCAmelCase__ : Union[str, Any] = script_name
else:
lowerCAmelCase__ : List[str] = f"""--config_file={args.config_file} {script_name}"""
lowerCAmelCase__ : str = ['''accelerate-launch'''] + test_args.split()
lowerCAmelCase__ : Optional[int] = execute_subprocess_async(a__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = test_command_parser()
lowerCAmelCase__ : Tuple = parser.parse_args()
test_command(a__ )
if __name__ == "__main__":
main()
| 37
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : str = """▁"""
_a : List[str] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_a : List[str] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
_a : str = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
_a : Optional[Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] =VOCAB_FILES_NAMES
a : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple =PRETRAINED_VOCAB_FILES_MAP
a : int =["""input_ids""", """attention_mask"""]
a : List[int] =[]
a : List[int] =[]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<mask>",__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = AddedToken(__SCREAMING_SNAKE_CASE,lstrip=__SCREAMING_SNAKE_CASE,rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,sep_token=__SCREAMING_SNAKE_CASE,cls_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,tokenizer_file=__SCREAMING_SNAKE_CASE,src_lang=__SCREAMING_SNAKE_CASE,tgt_lang=__SCREAMING_SNAKE_CASE,additional_special_tokens=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,legacy_behaviour=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase = 1
__lowerCAmelCase = len(self.sp_model )
__lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
__lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
__lowerCAmelCase = self.lang_code_to_id[self._src_lang]
__lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
__lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [1] * len(self.prefix_tokens )
__lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowerCAmelCase = src_lang
__lowerCAmelCase = self(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tgt_lang_id
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "eng_Latn",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "fra_Latn",**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = src_lang
__lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__lowerCAmelCase = []
__lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase = [self.cur_lang_code]
__lowerCAmelCase = [self.eos_token_id]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__lowerCAmelCase = []
__lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase = [self.cur_lang_code]
__lowerCAmelCase = [self.eos_token_id]
| 46
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : str =KandinskyVaaInpaintPipeline
a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a : str =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Dict =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase = np.ones((64, 64),dtype=np.floataa )
__lowerCAmelCase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46
| 1
|
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
__lowerCamelCase, nominal_annual_percentage_rate / 365, number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
| 0
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __magic_name__ (__lowercase ):
def __a ( self ) -> List[str]:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> int:
with self.assertRaises(_a ):
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __a ( self ) -> Optional[int]:
with self.assertRaises(_a ):
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> Optional[int]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __a ( self ) -> Dict:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> int:
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __a ( self ) -> Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __a ( self ) -> int:
import PIL.Image
lowerCAmelCase_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=_a ) as mock_cast_to_python_objects:
lowerCAmelCase_ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
lowerCAmelCase_ , lowerCAmelCase_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , _a )
self.assertFalse(kwargs["optimize_list_casting"] )
def A(__a: Optional[Any] , __a: int ):
lowerCAmelCase_ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
lowerCAmelCase_ = pa.ipc.open_stream(__a )
lowerCAmelCase_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: int , __a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A():
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pa.ipc.open_stream(__a )
lowerCAmelCase_ = f.read_all()
lowerCAmelCase_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def A(__a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def A(__a: Any ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def A(__a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: List[str] , __a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: Union[str, Any] , __a: List[Any] ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: Dict , __a: Dict ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A():
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
lowerCAmelCase_ = os.path.join(__a , "test.arrow" )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def A(__a: str ):
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def A(__a: Tuple , __a: Any ):
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
lowerCAmelCase_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A(__a: Any , __a: str , __a: int ):
lowerCAmelCase_ = pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A(__a: Tuple , __a: str , __a: Optional[int] ):
# in range
lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCAmelCase_ = copy.deepcopy(__a )
lowerCAmelCase_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def A(__a: Dict , __a: List[str] ):
lowerCAmelCase_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def A(__a: Union[str, Any] ):
lowerCAmelCase_ = "mock://dataset-train.arrow"
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def A():
lowerCAmelCase_ = pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def A(__a: Tuple , __a: Dict ):
import PIL.Image
lowerCAmelCase_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="png" )
lowerCAmelCase_ = pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"image": Image()} ) , embed_local_files=__a ) as writer:
writer.write({"image": image_path} )
writer.finalize()
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pq.read_table(__a )
lowerCAmelCase_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , __a )
with open(__a , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def A():
lowerCAmelCase_ = pa.schema([pa.field("col_1" , pa.string() , nullable=__a )] )
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 371
|
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[Any] = '''sew-d'''
def __init__( self , UpperCamelCase__=32 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=2 , UpperCamelCase__=512 , UpperCamelCase__=256 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("p2c", "c2p") , UpperCamelCase__="layer_norm" , UpperCamelCase__="gelu_python" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-7 , UpperCamelCase__=1e-5 , UpperCamelCase__="group" , UpperCamelCase__="gelu" , UpperCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__=False , UpperCamelCase__=128 , UpperCamelCase__=16 , UpperCamelCase__=True , UpperCamelCase__=0.05 , UpperCamelCase__=10 , UpperCamelCase__=2 , UpperCamelCase__=0.0 , UpperCamelCase__=10 , UpperCamelCase__=0 , UpperCamelCase__="mean" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=256 , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
snake_case : Tuple = hidden_size
snake_case : Dict = feat_extract_norm
snake_case : Optional[int] = feat_extract_activation
snake_case : List[str] = list(UpperCamelCase__ )
snake_case : Any = list(UpperCamelCase__ )
snake_case : Any = list(UpperCamelCase__ )
snake_case : Tuple = conv_bias
snake_case : int = num_conv_pos_embeddings
snake_case : Any = num_conv_pos_embedding_groups
snake_case : Optional[int] = len(self.conv_dim )
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[Any] = intermediate_size
snake_case : str = squeeze_factor
snake_case : Tuple = max_position_embeddings
snake_case : Optional[int] = position_buckets
snake_case : List[Any] = share_att_key
snake_case : int = relative_attention
snake_case : Any = norm_rel_ebd
snake_case : Optional[Any] = list(UpperCamelCase__ )
snake_case : Optional[int] = hidden_act
snake_case : int = num_attention_heads
snake_case : Optional[Any] = hidden_dropout
snake_case : List[Any] = attention_dropout
snake_case : Any = activation_dropout
snake_case : Union[str, Any] = feat_proj_dropout
snake_case : str = final_dropout
snake_case : Optional[Any] = layer_norm_eps
snake_case : str = feature_layer_norm_eps
snake_case : int = initializer_range
snake_case : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : Optional[int] = apply_spec_augment
snake_case : Tuple = mask_time_prob
snake_case : Optional[int] = mask_time_length
snake_case : List[str] = mask_time_min_masks
snake_case : List[str] = mask_feature_prob
snake_case : Dict = mask_feature_length
snake_case : List[Any] = mask_feature_min_masks
# ctc loss
snake_case : int = ctc_loss_reduction
snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
snake_case : Any = use_weighted_layer_sum
snake_case : Optional[Any] = classifier_proj_size
@property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 203
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__snake_case = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __lowerCAmelCase ( lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __lowerCAmelCase ( lowercase : int , lowercase : Dict ) -> Tuple:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : List[str] = False
elif args.student_type == "gpt2":
snake_case : Optional[int] = False
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : Optional[Any] = False
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case : Any = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowercase , required=lowercase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowercase , required=lowercase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowercase , choices=["distilbert", "roberta", "gpt2"] , required=lowercase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowercase , required=lowercase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowercase , type=lowercase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowercase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowercase , required=lowercase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowercase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowercase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowercase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowercase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowercase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowercase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowercase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowercase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowercase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowercase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowercase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowercase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=lowercase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowercase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowercase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowercase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=lowercase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowercase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowercase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowercase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowercase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowercase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowercase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowercase , default=4000 , help="Checkpoint interval." )
snake_case : str = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.student_type]
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case : int = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case : List[str] = tokenizer.all_special_tokens.index(lowercase )
snake_case : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
snake_case : Any = special_tok_ids
snake_case : Dict = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
snake_case : Optional[int] = pickle.load(lowercase )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
snake_case : Dict = pickle.load(lowercase )
snake_case : str = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case : str = 0.0 # do not predict special tokens
snake_case : Dict = torch.from_numpy(lowercase )
else:
snake_case : Tuple = None
snake_case : str = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
snake_case : str = student_config_class.from_pretrained(args.student_config )
snake_case : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
snake_case : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
snake_case : Union[str, Any] = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
snake_case : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case : Optional[Any] = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 203
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ = 50_0000
lowercase__ , lowercase__ = os.path.split(__file__)
lowercase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _snake_case ( lowercase__ , **lowercase__ ):
_lowerCamelCase : Union[str, Any] = dataset.map(**lowercase__ )
@get_duration
def _snake_case ( lowercase__ , **lowercase__ ):
_lowerCamelCase : int = dataset.filter(**lowercase__ )
def _snake_case ( ):
_lowerCamelCase : int = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Tuple = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ )
_lowerCamelCase : Optional[int] = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase__ )
def tokenize(lowercase__ ):
return tokenizer(examples['text'] )
_lowerCamelCase : Dict = map(lowercase__ )
_lowerCamelCase : Union[str, Any] = map(lowercase__ , batched=lowercase__ )
_lowerCamelCase : Dict = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='numpy' ):
_lowerCamelCase : Optional[Any] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='pandas' ):
_lowerCamelCase : str = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_lowerCamelCase : Tuple = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_lowerCamelCase : Optional[int] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
_lowerCamelCase : Tuple = map(lowercase__ , function=lowercase__ , batched=lowercase__ )
_lowerCamelCase : Optional[Any] = filter(lowercase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase__ , 'wb' ) as f:
f.write(json.dumps(lowercase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 12
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 12
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if "cls_token" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : str = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE : int = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )
SCREAMING_SNAKE_CASE : str = int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE : int = config.decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = """decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : int = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE : Tuple = val[:dim]
SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Dict = val[-dim:]
else:
SCREAMING_SNAKE_CASE : Tuple = config.hidden_size
SCREAMING_SNAKE_CASE : List[Any] = """vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
SCREAMING_SNAKE_CASE : Dict = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : str = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : List[Any] = val
return orig_state_dict
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 10_24
SCREAMING_SNAKE_CASE : Optional[Any] = 40_96
SCREAMING_SNAKE_CASE : Any = 24
SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = 14
SCREAMING_SNAKE_CASE : Union[str, Any] = 12_80
SCREAMING_SNAKE_CASE : Any = 51_20
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : List[str] = ViTMAEForPreTraining(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE : Tuple = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
SCREAMING_SNAKE_CASE : Dict = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE : Any = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ = '<<<<<<< This should probably be modified because it mentions: '
a_ = '=======\n>>>>>>>\n'
a_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
a_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def __lowercase ( lowerCamelCase : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _lowercase ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case : ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=snake_case , required=snake_case , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=snake_case , required=snake_case , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=snake_case )
def __init__( self : int , snake_case : str , snake_case : str , *snake_case : Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = get_logger('datasets-cli/converting' )
UpperCamelCase_ : Optional[Any] = tfds_path
UpperCamelCase_ : Union[str, Any] = datasets_directory
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
UpperCamelCase_ : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCamelCase_ : int = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
UpperCamelCase_ : Optional[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : str = []
UpperCamelCase_ : List[str] = {}
if os.path.isdir(self._tfds_path ):
UpperCamelCase_ : List[Any] = os.listdir(snake_case )
else:
UpperCamelCase_ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
UpperCamelCase_ : List[str] = os.path.join(snake_case , snake_case )
UpperCamelCase_ : str = os.path.join(snake_case , snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(snake_case , encoding='utf-8' ) as f:
UpperCamelCase_ : Dict = f.readlines()
UpperCamelCase_ : int = []
UpperCamelCase_ : Any = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : str = []
for line in lines:
UpperCamelCase_ : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCamelCase_ : List[Any] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
UpperCamelCase_ : Optional[Any] = ''
continue
elif "from absl import logging" in out_line:
UpperCamelCase_ : Dict = 'from datasets import logging\n'
elif "getLogger" in out_line:
UpperCamelCase_ : Optional[Any] = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCamelCase_ : str = True
UpperCamelCase_ : str = list(filter(lambda snake_case : e in out_line , snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + '\n' )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCamelCase_ : Union[str, Any] = re.sub(snake_case , snake_case , snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCamelCase_ : Union[str, Any] = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
UpperCamelCase_ : Any = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCamelCase_ : Any = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCamelCase_ : List[Any] = f_name.replace('.py' , '' )
UpperCamelCase_ : int = os.path.join(snake_case , snake_case )
UpperCamelCase_ : int = os.path.join(snake_case , snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
UpperCamelCase_ : Dict = os.path.basename(snake_case )
UpperCamelCase_ : Optional[Any] = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case , snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 50
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Dict=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( snake_case_ ):
def __init__( self : Tuple , snake_case : Optional[int] , snake_case : Optional[Any]=1_3 , snake_case : Optional[Any]=7 , snake_case : Any=True , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=9_9 , snake_case : int=3_2 , snake_case : str=3_2 , snake_case : str=2 , snake_case : List[Any]=4 , snake_case : Tuple=3_7 , snake_case : Any="gelu" , snake_case : str=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Optional[int]=1_6 , snake_case : List[Any]=2 , snake_case : Dict=0.02 , snake_case : List[str]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = parent
UpperCamelCase_ : Any = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : List[Any] = is_training
UpperCamelCase_ : Optional[Any] = use_input_mask
UpperCamelCase_ : Tuple = use_token_type_ids
UpperCamelCase_ : Optional[int] = use_labels
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Optional[int] = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : List[str] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Tuple = type_vocab_size
UpperCamelCase_ : Optional[Any] = type_sequence_label_size
UpperCamelCase_ : Any = initializer_range
UpperCamelCase_ : Tuple = num_labels
UpperCamelCase_ : Tuple = num_choices
UpperCamelCase_ : Tuple = scope
UpperCamelCase_ : Dict = embedding_size
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Dict = None
if self.use_labels:
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Union[str, Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = TFMobileBertModel(config=snake_case )
UpperCamelCase_ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = [input_ids, input_mask]
UpperCamelCase_ : List[Any] = model(snake_case )
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForMaskedLM(config=snake_case )
UpperCamelCase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Any , snake_case : int , snake_case : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case )
UpperCamelCase_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : str , snake_case : str , snake_case : Any , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForPreTraining(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Dict , snake_case : List[str] , snake_case : str , snake_case : List[str] , snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : Dict = TFMobileBertForSequenceClassification(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.num_choices
UpperCamelCase_ : Dict = TFMobileBertForMultipleChoice(config=snake_case )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[str] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Optional[int] , snake_case : str , snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Any = self.num_labels
UpperCamelCase_ : Optional[Any] = TFMobileBertForTokenClassification(config=snake_case )
UpperCamelCase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Tuple , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = TFMobileBertForQuestionAnswering(config=snake_case )
UpperCamelCase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : Union[str, Any] = config_and_inputs
UpperCamelCase_ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ : str = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ : Optional[Any] = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCamelCase_ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ : List[str] = model(snake_case )[0]
UpperCamelCase_ : Any = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case )
UpperCamelCase_ : Dict = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 50
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
def __init__( self , **lowercase ) -> Optional[int]:
super().__init__(**lowercase )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(lowercase )
def __call__( self , lowercase , lowercase = None , **lowercase , ) -> List[str]:
if "text_queries" in kwargs:
lowerCAmelCase = kwargs.pop("""text_queries""" )
if isinstance(lowercase , (str, Image.Image) ):
lowerCAmelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowerCAmelCase = image
lowerCAmelCase = super().__call__(lowercase , **lowercase )
return results
def _snake_case ( self , **lowercase ) -> List[str]:
lowerCAmelCase = {}
if "threshold" in kwargs:
lowerCAmelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
lowerCAmelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def _snake_case ( self , lowercase ) -> List[str]:
lowerCAmelCase = load_image(inputs["""image"""] )
lowerCAmelCase = inputs["""candidate_labels"""]
if isinstance(lowercase , lowercase ):
lowerCAmelCase = candidate_labels.split(""",""" )
lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase ):
lowerCAmelCase = self.tokenizer(lowercase , return_tensors=self.framework )
lowerCAmelCase = self.image_processor(lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = model_inputs.pop("""target_size""" )
lowerCAmelCase = model_inputs.pop("""candidate_label""" )
lowerCAmelCase = model_inputs.pop("""is_last""" )
lowerCAmelCase = self.model(**lowercase )
lowerCAmelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _snake_case ( self , lowercase , lowercase=0.1 , lowercase=None ) -> List[str]:
lowerCAmelCase = []
for model_output in model_outputs:
lowerCAmelCase = model_output["""candidate_label"""]
lowerCAmelCase = BaseModelOutput(lowercase )
lowerCAmelCase = self.image_processor.post_process_object_detection(
outputs=lowercase , threshold=lowercase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase = outputs["""scores"""][index].item()
lowerCAmelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
lowerCAmelCase = {"""score""": score, """label""": label, """box""": box}
results.append(lowercase )
lowerCAmelCase = sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k:
lowerCAmelCase = results[:top_k]
return results
def _snake_case ( self , lowercase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = box.int().tolist()
lowerCAmelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 46
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__( a_ , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = CTRLTokenizer
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Optional[int] = False
def snake_case__ ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
A__ = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
A__ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def snake_case__ ( self ,**__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = 'adapt react readapt apt'
A__ = 'adapt react readapt apt'
return input_text, output_text
def snake_case__ ( self ) -> Optional[Any]:
A__ = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A__ = 'adapt react readapt apt'
A__ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
A__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
| 350
|
"""simple docstring"""
__lowerCamelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowerCamelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__lowerCamelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 154
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" ,[
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" ,"w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
A_ = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" ,[
DatasetInfo(),
DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,),
] ,)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : DatasetInfo ):
"""simple docstring"""
A_ = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
A_ = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase ,"dataset_info.json" ) )
def __snake_case ( ):
"""simple docstring"""
A_ = DatasetInfo(
description="foo" ,citation="bar" ,homepage="https://foo.bar" ,license="CC0" ,features=Features({"a": Value("int32" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train", "num_examples": 42}] ,download_checksums={} ,download_size=1337 ,post_processing_size=442 ,dataset_size=1234 ,size_in_bytes=1337 + 442 + 1234 ,)
A_ = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
A_ = yaml.safe_dump(__lowercase )
A_ = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def __snake_case ( ):
"""simple docstring"""
A_ = DatasetInfo()
A_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" ,[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] ,)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : DatasetInfosDict ):
"""simple docstring"""
A_ = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
A_ = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase ,"README.md" ) )
| 312
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22
| 0
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase_ = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase_ = concatenate_datasets
lowerCamelCase_ = DownloadConfig
lowerCamelCase_ = DownloadManager
lowerCamelCase_ = DownloadMode
lowerCamelCase_ = DownloadConfig
lowerCamelCase_ = DownloadMode
lowerCamelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 253
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''table-transformer'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=100 , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict="sine" , lowerCAmelCase_ : Optional[Any]="resnet50" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : str = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Optional[Any] = num_queries
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = encoder_layers
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : int = decoder_ffn_dim
UpperCAmelCase_ : int = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation_dropout
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Any = init_xavier_std
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Dict = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : List[str] = position_embedding_type
UpperCAmelCase_ : Dict = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Tuple = dilation
# Hungarian matcher
UpperCAmelCase_ : Optional[Any] = class_cost
UpperCAmelCase_ : List[Any] = bbox_cost
UpperCAmelCase_ : Optional[int] = giou_cost
# Loss coefficients
UpperCAmelCase_ : Optional[int] = mask_loss_coefficient
UpperCAmelCase_ : List[str] = dice_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : Dict = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.d_model
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 12
| 253
| 1
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase_ = 500_000
UpperCAmelCase_ , UpperCAmelCase_ = os.path.split(__file__)
UpperCAmelCase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCamelCase__ ( A__ : datasets.Dataset , **A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = dataset.map(**A__ )
@get_duration
def lowerCamelCase__ ( A__ : datasets.Dataset , **A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = dataset.filter(**A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__lowerCamelCase = generate_example_dataset(
os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ )
__lowerCamelCase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=A__ )
def tokenize(A__ : str ):
return tokenizer(examples["""text"""] )
__lowerCamelCase = map(A__ )
__lowerCamelCase = map(A__ , batched=A__ )
__lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type="""numpy""" ):
__lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type="""pandas""" ):
__lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
__lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
__lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ )
__lowerCamelCase = map(A__ , function=A__ , batched=A__ )
__lowerCamelCase = filter(A__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(A__ , """wb""" ) as f:
f.write(json.dumps(A__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 12
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12
| 1
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
if start < end:
UpperCAmelCase_ : Optional[int] = randint(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : List[str] = a[end]
UpperCAmelCase_ : str = a[pivot]
UpperCAmelCase_ : List[Any] = temp
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = _in_place_partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
count += _in_place_quick_sort(lowerCamelCase_ , lowerCamelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCamelCase_ , p + 1 , lowerCamelCase_ )
return count
def _lowerCamelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[int] = randint(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : int = a[end]
UpperCAmelCase_ : str = a[pivot]
UpperCAmelCase_ : Dict = temp
UpperCAmelCase_ : List[Any] = start - 1
for index in range(lowerCamelCase_ , lowerCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase_ : int = new_pivot_index + 1
UpperCAmelCase_ : List[Any] = a[new_pivot_index]
UpperCAmelCase_ : List[Any] = a[index]
UpperCAmelCase_ : Optional[int] = temp
UpperCAmelCase_ : int = a[new_pivot_index + 1]
UpperCAmelCase_ : Any = a[end]
UpperCAmelCase_ : List[Any] = temp
return new_pivot_index + 1, count
snake_case__ : Optional[int] = TemporaryFile()
snake_case__ : Any = 100 # 1000 elements are to be sorted
snake_case__ , snake_case__ : Union[str, Any] = 0, 1 # mean and standard deviation
snake_case__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
snake_case__ : str = np.load(outfile)
snake_case__ : Optional[Any] = len(M) - 1
snake_case__ : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 274
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274
| 1
|
from __future__ import annotations
class lowerCAmelCase :
def __init__( self : str , UpperCAmelCase : int ) -> None:
lowerCamelCase__ : int = data
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
lowerCamelCase__ : Tuple = Node(1 )
lowerCamelCase__ : Tuple = Node(2 )
lowerCamelCase__ : List[Any] = Node(3 )
lowerCamelCase__ : Optional[Any] = Node(4 )
lowerCamelCase__ : Dict = Node(5 )
lowerCamelCase__ : Union[str, Any] = Node(6 )
lowerCamelCase__ : Optional[int] = Node(7 )
lowerCamelCase__ : Dict = Node(8 )
lowerCamelCase__ : List[Any] = Node(9 )
print(is_full_binary_tree(_UpperCAmelCase ) )
print(depth_of_tree(_UpperCAmelCase ) )
print('Tree is: ' )
display(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
| 1
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 256
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = cva.imread(__a , 0 )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.img )
SCREAMING_SNAKE_CASE__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
SCREAMING_SNAKE_CASE__ = np.sum(__a )
for i in range(len(__a ) ):
SCREAMING_SNAKE_CASE__ = x[i] / self.k
self.sk += prk
SCREAMING_SNAKE_CASE__ = (self.L - 1) * self.sk
if self.rem != 0:
SCREAMING_SNAKE_CASE__ = int(last % last )
SCREAMING_SNAKE_CASE__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
SCREAMING_SNAKE_CASE__ = int(np.ma.count(self.img ) / self.img[1].size )
SCREAMING_SNAKE_CASE__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
SCREAMING_SNAKE_CASE__ = self.img[j][i]
if num != self.last_list[num]:
SCREAMING_SNAKE_CASE__ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def lowercase_ ( self : List[Any] ) -> Tuple:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase_ ( self : List[str] ) -> List[str]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_SCREAMING_SNAKE_CASE : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 350
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "decision_transformer"
a = ["past_key_values"]
a = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=17 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : Union[str, Any]=4096 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=1 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=5_0256 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = state_dim
SCREAMING_SNAKE_CASE__ = act_dim
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = max_ep_len
SCREAMING_SNAKE_CASE__ = action_tanh
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 218
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
| 0
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = '▁'
__A : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = BertGenerationTokenizer
_UpperCamelCase:List[str] = False
_UpperCamelCase:List[Any] = True
def _snake_case ( self )-> Optional[int]:
super().setUp()
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self )-> Any:
lowerCamelCase_ ="""<s>"""
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1002 )
def _snake_case ( self )-> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self )-> Any:
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self )-> str:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ ="""Hello World!"""
lowerCamelCase_ =[1_8536, 2260, 101]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase_ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _snake_case ( self )-> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ =""" """.join(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BertGenerationConfig()
lowerCamelCase_ =BertGenerationEncoder(_SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> int:
# fmt: off
lowerCamelCase_ ={"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 367
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : Optional[int] = pd.read_csv('sample_data.csv', header=None)
__A : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Tuple = df.iloc[:, 1:2]
__A : Tuple = actual_data.values.reshape(len_data, 1)
__A : str = MinMaxScaler().fit_transform(actual_data)
__A : List[str] = 10
__A : Any = 5
__A : Optional[Any] = 20
__A : List[str] = len_data - periods * look_back
__A : str = actual_data[:division]
__A : int = actual_data[division - look_back :]
__A, __A : List[str] = [], []
__A, __A : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : List[Any] = np.array(train_x)
__A : Tuple = np.array(test_x)
__A : Any = np.array([list(i.ravel()) for i in train_y])
__A : List[Any] = np.array([list(i.ravel()) for i in test_y])
__A : Union[str, Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__A : Tuple = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__A : Optional[int] = model.predict(x_test)
| 49
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : Tuple = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def A_ ( a , a , a=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if latents is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ : str = latents.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : Dict = text_inputs.input_ids
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_input_ids.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
SCREAMING_SNAKE_CASE_ : Any = negative_prompt
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : List[str] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ : Dict = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ : int = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE_ : int = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
SCREAMING_SNAKE_CASE_ : Dict = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
SCREAMING_SNAKE_CASE_ : int = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE_ : int = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ : str = torch.device(f"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ : int = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
SCREAMING_SNAKE_CASE_ : Any = self._execution_device
SCREAMING_SNAKE_CASE_ : Any = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Optional[Any] = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : List[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : Dict = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Optional[int] = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : Dict = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE_ : Any = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE_ : Dict = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 253
|
lowerCAmelCase : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 253
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase : Union[str, Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase : List[Any] = TaTokenizerFast
UpperCamelCase : List[str] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 263
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def A ( snake_case :list ) -> int:
if not postfix_notation:
return 0
__UpperCamelCase = {'+', '-', '*', '/'}
__UpperCamelCase = []
for token in postfix_notation:
if token in operations:
__UpperCamelCase , __UpperCamelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263
| 1
|
def __lowerCamelCase ( __a :float , __a :float ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__a ) * abs(__a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 274
|
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274
| 1
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__lowerCamelCase : List[Any] = parser.parse_args()
__lowerCamelCase : List[str] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 204
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
SCREAMING_SNAKE_CASE__ = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE__ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 204
| 1
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str ):
"""simple docstring"""
if openai_config_file == "":
__a =OpenAIGPTConfig()
else:
__a =OpenAIGPTConfig.from_json_file(_snake_case )
__a =OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__a =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__a =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _snake_case )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowerCAmelCase : int = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 218
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCAmelCase : Optional[Any] = Lock()
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[str] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__a =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__a =min(_snake_case , _snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__a =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__a =max(_snake_case , _snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_snake_case )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
__a =[]
__a =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__a =Pipe()
__a =Pipe()
process_array_.append(
Process(
target=_snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__a =temp_rs
__a =temp_rr
for i in range(1 , len(_snake_case ) - 1 ):
__a =Pipe()
__a =Pipe()
process_array_.append(
Process(
target=_snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__a =temp_rs
__a =temp_rr
process_array_.append(
Process(
target=_snake_case , args=(
len(_snake_case ) - 1,
arr[len(_snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_snake_case ) ):
__a =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ):
"""simple docstring"""
__a =list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_snake_case )
__a =odd_even_transposition(_snake_case )
print('Sorted List\n' )
print(*_snake_case )
if __name__ == "__main__":
main()
| 218
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
a = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(A ) , torch_builtin(A ) ) )
self.assertFalse(torch.allclose(gelu_python(A ) , gelu_new(A ) ) )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
a = get_activation("gelu" )
a = get_activation("gelu_10" )
a = torch_builtin(A )
a = geluaa(A )
a = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(A ):
get_activation("bogus" )
with self.assertRaises(A ):
get_activation(A )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = get_activation("gelu" )
a = 1
a = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(A ):
a = acta.a
| 180
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : int = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a__ ( UpperCamelCase__ ):
a : Optional[Any] = """sew-d"""
def __init__( self , A=32 , A=768 , A=12 , A=12 , A=3072 , A=2 , A=512 , A=256 , A=True , A=True , A=("p2c", "c2p") , A="layer_norm" , A="gelu_python" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.0_2 , A=1e-7 , A=1e-5 , A="group" , A="gelu" , A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=128 , A=16 , A=True , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A="mean" , A=False , A=False , A=256 , A=0 , A=1 , A=2 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(A )
a = list(A )
a = list(A )
a = conv_bias
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = squeeze_factor
a = max_position_embeddings
a = position_buckets
a = share_att_key
a = relative_attention
a = norm_rel_ebd
a = list(A )
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layer_norm_eps
a = feature_layer_norm_eps
a = initializer_range
a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# sequence classification
a = use_weighted_layer_sum
a = classifier_proj_size
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 180
| 1
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = ''''''
A__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A__ = None # compression type in fsspec. ex: "gzip"
A__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , __lowerCamelCase : str = "" , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[dict] = None , **__lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(self , **__SCREAMING_SNAKE_CASE )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : int = fsspec.open(
__SCREAMING_SNAKE_CASE , mode="rb" , protocol=__SCREAMING_SNAKE_CASE , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : Optional[Any] = os.path.basename(self.file.path.split("::" )[0] )
lowerCamelCase__ : List[str] = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , __lowerCamelCase : int ):
'''simple docstring'''
return super()._strip_protocol(__SCREAMING_SNAKE_CASE ).lstrip("/" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
lowerCamelCase__ : Any = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCamelCase__ : Optional[Any] = {f["name"]: f}
def lowerCAmelCase ( self : Any , __lowerCamelCase : str ):
'''simple docstring'''
return self.file.open().read()
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : str = "rb" , __lowerCamelCase : int=None , __lowerCamelCase : int=True , __lowerCamelCase : str=None , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : int = self._strip_protocol(__SCREAMING_SNAKE_CASE )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'" )
return self.file.open()
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = '''bz2'''
A__ = '''bz2'''
A__ = '''.bz2'''
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = '''gzip'''
A__ = '''gzip'''
A__ = '''.gz'''
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = '''lz4'''
A__ = '''lz4'''
A__ = '''.lz4'''
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = '''xz'''
A__ = '''xz'''
A__ = '''.xz'''
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = '''zstd'''
A__ = '''zstd'''
A__ = '''.zst'''
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str = "rb" , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : int = DEFAULT_BLOCK_SIZE , **__lowerCamelCase : str , ):
'''simple docstring'''
super().__init__(
fo=__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE , target_protocol=__SCREAMING_SNAKE_CASE , target_options=__SCREAMING_SNAKE_CASE , block_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = file_
def __enter__( self : Union[str, Any] ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ):
'''simple docstring'''
self._file.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __iter__( self : Tuple ):
'''simple docstring'''
return iter(self._file )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : Optional[int] , __lowerCamelCase : Any ):
'''simple docstring'''
return getattr(self._file , __SCREAMING_SNAKE_CASE )
def fixed_enter(*__lowerCamelCase : int , **__lowerCamelCase : Tuple ):
return WrappedFile(_enter(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) )
lowerCamelCase__ : int = fixed_enter
| 184
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
a = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str]=0 ):
lowerCamelCase__ : List[Any] = np.random.RandomState(UpperCamelCase__ )
lowerCamelCase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.get_dummy_inputs()
lowerCamelCase__ : Optional[int] = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ : Union[str, Any] = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs()
lowerCamelCase__ : List[Any] = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.get_dummy_inputs()
lowerCamelCase__ : str = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ : str = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.get_dummy_inputs()
lowerCamelCase__ : Union[str, Any] = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ : Union[str, Any] = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs()
lowerCamelCase__ : Any = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ : Tuple = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.get_dummy_inputs()
lowerCamelCase__ : int = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ : Dict = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.get_dummy_inputs()
lowerCamelCase__ : Any = 3 * [inputs['''prompt''']]
# forward
lowerCamelCase__ : List[str] = pipe(**UpperCamelCase__ )
lowerCamelCase__ : Tuple = output.images[0, -3:, -3:, -1]
lowerCamelCase__ : Tuple = self.get_dummy_inputs()
lowerCamelCase__ : List[Any] = 3 * [inputs.pop("""prompt""" )]
lowerCamelCase__ : List[str] = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
lowerCamelCase__ : Tuple = text_inputs['''input_ids''']
lowerCamelCase__ : int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCamelCase__ : List[Any] = prompt_embeds
# forward
lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs()
lowerCamelCase__ : Any = 3 * ['''this is a negative prompt''']
lowerCamelCase__ : Any = negative_prompt
lowerCamelCase__ : Tuple = 3 * [inputs['''prompt''']]
# forward
lowerCamelCase__ : List[str] = pipe(**UpperCamelCase__ )
lowerCamelCase__ : Any = output.images[0, -3:, -3:, -1]
lowerCamelCase__ : Tuple = self.get_dummy_inputs()
lowerCamelCase__ : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
lowerCamelCase__ : str = []
for p in [prompt, negative_prompt]:
lowerCamelCase__ : int = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
lowerCamelCase__ : int = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCamelCase__ : List[str] = embeds
# forward
lowerCamelCase__ : List[Any] = pipe(**UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = ort.SessionOptions()
lowerCamelCase__ : Optional[Any] = False
return options
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
lowerCamelCase__ : List[str] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
lowerCamelCase__ : Union[str, Any] = output.images
lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
lowerCamelCase__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : int = '''open neural network exchange'''
lowerCamelCase__ : int = np.random.RandomState(0 )
lowerCamelCase__ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
lowerCamelCase__ : Optional[Any] = output.images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : str = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
lowerCamelCase__ : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : List[str] = '''open neural network exchange'''
lowerCamelCase__ : Tuple = np.random.RandomState(0 )
lowerCamelCase__ : int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : str = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = 0
def test_callback_fn(UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: np.ndarray ) -> None:
lowerCamelCase__ : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase__ : str = latents[0, -3:, -3:, -1]
lowerCamelCase__ : Any = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase__ : List[Any] = latents[0, -3:, -3:, -1]
lowerCamelCase__ : int = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Tuple = '''Andromeda galaxy in a bottle'''
lowerCamelCase__ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert pipe.safety_checker is None
lowerCamelCase__ : List[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase__ : Tuple = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 367
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_A : Dict =parser.parse_args()
_A : List[str] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A : Any =CLIPImageProcessor()
_A : Union[str, Any] =CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_A : Union[str, Any] =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 129
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=1_3 , A=3_0 , A=2 , A=3 , A=True , A=True , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , ) -> Dict:
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : List[str] = (image_size // patch_size) ** 2
_UpperCAmelCase : List[str] = num_patches + 1
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values
def __lowerCAmelCase ( self , A , A ) -> List[str]:
_UpperCAmelCase : str = FlaxViTModel(config=A )
_UpperCAmelCase : List[str] = model(A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Tuple = (self.image_size, self.image_size)
_UpperCAmelCase : List[Any] = (self.patch_size, self.patch_size)
_UpperCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCAmelCase ( self , A , A ) -> List[str]:
_UpperCAmelCase : Any = self.type_sequence_label_size
_UpperCAmelCase : Tuple = FlaxViTForImageClassification(config=A )
_UpperCAmelCase : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : List[str] = FlaxViTForImageClassification(A )
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Union[str, Any] = model(A )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Any = config_and_inputs
_UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCAmelCase ( self ) -> None:
_UpperCAmelCase : List[str] = FlaxViTModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(A )
_UpperCAmelCase : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Dict = self._prepare_for_class(A , A )
_UpperCAmelCase : Union[str, Any] = model_class(A )
@jax.jit
def model_jitted(A , **A ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
_UpperCAmelCase : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCAmelCase : Tuple = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
_UpperCAmelCase : Tuple = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
| 263
|
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ :Union[str, Any] = logging.get_logger(__name__)
A_ :Tuple = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""ibert"""
def __init__( self , lowerCamelCase__=30522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=False , lowerCamelCase__="none" , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Tuple =vocab_size
__UpperCamelCase : List[str] =hidden_size
__UpperCamelCase : Optional[Any] =num_hidden_layers
__UpperCamelCase : int =num_attention_heads
__UpperCamelCase : Any =hidden_act
__UpperCamelCase : str =intermediate_size
__UpperCamelCase : List[str] =hidden_dropout_prob
__UpperCamelCase : Optional[Any] =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =max_position_embeddings
__UpperCamelCase : Dict =type_vocab_size
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Optional[int] =layer_norm_eps
__UpperCamelCase : int =position_embedding_type
__UpperCamelCase : int =quant_mode
__UpperCamelCase : Any =force_dequant
class __A ( a ):
"""simple docstring"""
@property
def __lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__UpperCamelCase : List[str] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase : Dict ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 354
|
import itertools
import math
def A ( a_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(a_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ) -> Tuple:
__UpperCamelCase : Optional[Any] =2
while True:
if is_prime(a_ ):
yield num
num += 1
def A ( a_ = 10_001 ) -> int:
return next(itertools.islice(prime_generator() ,nth - 1 ,a_ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 245
| 0
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray , lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = cva.getAffineTransform(lowercase , lowercase )
return cva.warpAffine(lowercase , lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCamelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
lowerCamelCase : str = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCamelCase , lowerCamelCase : Union[str, Any] = gray_img.shape
# set different points to rotate image
lowerCamelCase : int = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
lowerCamelCase : int = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
lowerCamelCase : Dict = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
lowerCamelCase : Tuple = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
lowerCamelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCamelCase : Dict = plt.figure(1)
lowerCamelCase : str = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 204
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[Any] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204
| 1
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_a = False
_a = logging.get_logger(__name__)
_a = 'ybelkada/fonts'
def _A ( ) -> str:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch.")
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : Tuple, UpperCamelCase_ : Optional[Any]) -> Optional[int]:
'''simple docstring'''
requires_backends(UpperCamelCase_, ["torch"])
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0)
__lowercase = torch.nn.functional.unfold(UpperCamelCase_, (patch_height, patch_width), stride=(patch_height, patch_width))
__lowercase = patches.reshape(image_tensor.size(0), image_tensor.size(1), UpperCamelCase_, UpperCamelCase_, -1)
__lowercase = patches.permute(0, 4, 2, 3, 1).reshape(
image_tensor.size(2) // patch_height, image_tensor.size(3) // patch_width, image_tensor.size(1) * patch_height * patch_width, )
return patches.unsqueeze(0)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : int = 36, UpperCamelCase_ : str = "black", UpperCamelCase_ : str = "white", UpperCamelCase_ : int = 5, UpperCamelCase_ : int = 5, UpperCamelCase_ : int = 5, UpperCamelCase_ : int = 5, UpperCamelCase_ : Optional[bytes] = None, UpperCamelCase_ : Optional[str] = None, ) -> Image.Image:
'''simple docstring'''
requires_backends(UpperCamelCase_, "vision")
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80)
__lowercase = wrapper.wrap(text=UpperCamelCase_)
__lowercase = "\n".join(UpperCamelCase_)
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(UpperCamelCase_)
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(UpperCamelCase_, "Arial.TTF")
__lowercase = ImageFont.truetype(UpperCamelCase_, encoding="UTF-8", size=UpperCamelCase_)
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new("RGB", (1, 1), UpperCamelCase_))
__lowercase ,__lowercase ,__lowercase ,__lowercase = temp_draw.textbbox((0, 0), UpperCamelCase_, UpperCamelCase_)
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new("RGB", (image_width, image_height), UpperCamelCase_)
__lowercase = ImageDraw.Draw(UpperCamelCase_)
draw.text(xy=(left_padding, top_padding), text=UpperCamelCase_, fill=UpperCamelCase_, font=UpperCamelCase_)
return image
def _A ( UpperCamelCase_ : np.ndarray, UpperCamelCase_ : str, **UpperCamelCase_ : Dict) -> Tuple:
'''simple docstring'''
requires_backends(UpperCamelCase_, "vision")
# Convert to PIL image if necessary
__lowercase = to_pil_image(UpperCamelCase_)
__lowercase = render_text(UpperCamelCase_, **UpperCamelCase_)
__lowercase = max(header_image.width, image.width)
__lowercase = int(image.height * (new_width / image.width))
__lowercase = int(header_image.height * (new_width / header_image.width))
__lowercase = Image.new("RGB", (new_width, new_height + new_header_height), "white")
new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0))
new_image.paste(image.resize((new_width, new_height)), (0, new_header_height))
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(UpperCamelCase_)
if infer_channel_dimension_format(UpperCamelCase_) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(UpperCamelCase_, ChannelDimension.LAST)
return new_image
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any = ["flattened_patches"]
def __init__( self : Dict, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : int = 2_0_4_8, UpperCAmelCase__ : bool = False, **UpperCAmelCase__ : Dict, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def _lowercase ( self : List[Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : int, UpperCAmelCase__ : dict, **UpperCAmelCase__ : Optional[Any] ):
requires_backends(self.extract_flattened_patches, "torch" )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(UpperCAmelCase__, ChannelDimension.FIRST )
__lowercase = torch.from_numpy(UpperCAmelCase__ )
__lowercase ,__lowercase = patch_size["height"], patch_size["width"]
__lowercase ,__lowercase = get_image_size(UpperCAmelCase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ), UpperCAmelCase__ ), 1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ), UpperCAmelCase__ ), 1 )
__lowercase = max(num_feasible_rows * patch_height, 1 )
__lowercase = max(num_feasible_cols * patch_width, 1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ), size=(resized_height, resized_width), mode="bilinear", align_corners=UpperCAmelCase__, antialias=UpperCAmelCase__, ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(UpperCAmelCase__ ).reshape([rows, 1] ).repeat(1, UpperCAmelCase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(UpperCAmelCase__ ).reshape([1, columns] ).repeat(UpperCAmelCase__, 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches], -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(UpperCAmelCase__, [0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(UpperCAmelCase__ )
return result
def _lowercase ( self : Dict, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(UpperCAmelCase__ )
__lowercase = np.std(UpperCAmelCase__ )
__lowercase = max(UpperCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST, **UpperCAmelCase__ : Optional[int], ):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get("data_format", UpperCAmelCase__ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
__lowercase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
__lowercase = kwargs.pop("font_bytes", UpperCAmelCase__ )
__lowercase = kwargs.pop("font_path", UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [header_text] * len(UpperCAmelCase__ )
__lowercase = [
render_header(UpperCAmelCase__, header_text[i], font_bytes=UpperCAmelCase__, font_path=UpperCAmelCase__ )
for i, image in enumerate(UpperCAmelCase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=UpperCAmelCase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=UpperCAmelCase__, max_patches=UpperCAmelCase__, patch_size=UpperCAmelCase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=UpperCAmelCase__ )
return encoded_outputs
| 354
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = 'bert-base-cased'
_a = 'google/pegasus-xsum'
_a = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_a = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_a = 'patrickvonplaten/t5-tiny-random'
_a = 'sshleifer/bart-tiny-random'
_a = 'sshleifer/tiny-mbart'
_a = 'sshleifer/tiny-marian-en-de'
def _A ( UpperCamelCase_ : Path, UpperCamelCase_ : list) -> Optional[Any]:
'''simple docstring'''
__lowercase = "\n".join(UpperCamelCase_)
Path(UpperCamelCase_).open("w").writelines(UpperCamelCase_)
def _A ( UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.source"""), UpperCamelCase_)
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.target"""), UpperCamelCase_)
return tmp_dir
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
@slow
def _lowercase ( self : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowercase ,__lowercase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, src_lang=UpperCAmelCase__, tgt_lang=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowercase = shift_tokens_right(batch["labels"], tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = LegacySeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=2_0, max_target_length=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _lowercase ( self : str ):
__lowercase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowercase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCAmelCase__, UpperCAmelCase__, 1_2_8, UpperCAmelCase__ )
__lowercase = {x.name for x in tmp_dir.iterdir()}
__lowercase = {x.name for x in save_dir.iterdir()}
__lowercase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCAmelCase__ ) < len(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCAmelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq" )
def _lowercase ( self : List[str] ):
if not FAIRSEQ_AVAILABLE:
return
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=6_4 )
__lowercase = 6_4
__lowercase = ds.make_dynamic_sampler(UpperCAmelCase__, required_batch_size_multiple=UpperCAmelCase__ )
__lowercase = [len(UpperCAmelCase__ ) for x in batch_sampler]
assert len(set(UpperCAmelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # no dropped or added examples
__lowercase = DataLoader(UpperCAmelCase__, batch_sampler=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = []
__lowercase = []
for batch in data_loader:
__lowercase = batch["input_ids"].shape
__lowercase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowercase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(UpperCAmelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCAmelCase__ )
assert num_src_per_batch[0] == max(UpperCAmelCase__ )
if failures:
raise AssertionError(F"""too many tokens in {len(UpperCAmelCase__ )} batches""" )
def _lowercase ( self : Any ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=5_1_2 )
__lowercase = 2
__lowercase = ds.make_sortish_sampler(UpperCAmelCase__, shuffle=UpperCAmelCase__ )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2, sampler=UpperCAmelCase__ )
__lowercase = tokenizer.pad_token_id
def count_pad_tokens(UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any]="input_ids" ):
return [batch[k].eq(UpperCAmelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) ) < sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) )
assert sum(count_pad_tokens(UpperCAmelCase__ ) ) < sum(count_pad_tokens(UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int=1_0_0_0, UpperCAmelCase__ : str=1_2_8 ):
if os.getenv("USE_REAL_DATA", UpperCAmelCase__ ):
__lowercase = "examples/seq2seq/wmt_en_ro"
__lowercase = max_len * 2 * 6_4
if not Path(UpperCAmelCase__ ).joinpath("train.len" ).exists():
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
else:
__lowercase = "examples/seq2seq/test_data/wmt_en_ro"
__lowercase = max_len * 4
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, n_obs=UpperCAmelCase__, )
return ds, max_tokens, tokenizer
def _lowercase ( self : Union[str, Any] ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset()
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=0, add_extra_examples=UpperCAmelCase__ ) )
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=1, add_extra_examples=UpperCAmelCase__ ) )
assert idsa.intersection(UpperCAmelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
def _lowercase ( self : int, UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__, use_fast=UpperCAmelCase__ )
if tok_name == MBART_TINY:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", )
__lowercase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, )
__lowercase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCAmelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCAmelCase__ ) == 0
| 144
| 0
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def snake_case ( *snake_case__ :Optional[Any]) -> Optional[Any]:
with open(snake_case__ , """r""") as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX)
try:
print(*snake_case__)
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN)
_SCREAMING_SNAKE_CASE = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
_SCREAMING_SNAKE_CASE = torch.device('cuda', local_rank)
_SCREAMING_SNAKE_CASE = socket.gethostname()
_SCREAMING_SNAKE_CASE = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_SCREAMING_SNAKE_CASE = dist.get_rank()
_SCREAMING_SNAKE_CASE = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 180
|
from math import isqrt
def snake_case ( snake_case__ :int) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__) + 1))
def snake_case ( snake_case__ :int = 10**6) -> int:
_A = 0
_A = 1
_A = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'informer'
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , snake_case : Any = None , snake_case : Optional[int] = None , snake_case : Union[str, Any] = "student_t" , snake_case : Optional[int] = "nll" , snake_case : Dict = 1 , snake_case : List[str] = None , snake_case : int = "mean" , snake_case : List[str] = 0 , snake_case : Tuple = 0 , snake_case : List[Any] = 0 , snake_case : Optional[Any] = 0 , snake_case : List[str] = None , snake_case : Union[str, Any] = None , snake_case : Tuple = 64 , snake_case : Optional[Any] = 32 , snake_case : List[Any] = 32 , snake_case : int = 2 , snake_case : Tuple = 2 , snake_case : Any = 2 , snake_case : Optional[Any] = 2 , snake_case : str = True , snake_case : int = "gelu" , snake_case : Dict = 0.05 , snake_case : List[Any] = 0.1 , snake_case : int = 0.1 , snake_case : Union[str, Any] = 0.1 , snake_case : Optional[int] = 0.1 , snake_case : str = 100 , snake_case : List[str] = 0.02 , snake_case : str=True , snake_case : Union[str, Any] = "prob" , snake_case : Any = 5 , snake_case : List[str] = True , **snake_case : Any , ):
'''simple docstring'''
A__ : Union[str, Any] = prediction_length
A__ : List[Any] = context_length or prediction_length
A__ : List[Any] = distribution_output
A__ : str = loss
A__ : List[str] = input_size
A__ : List[str] = num_time_features
A__ : str = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A__ : str = scaling
A__ : Union[str, Any] = num_dynamic_real_features
A__ : List[Any] = num_static_real_features
A__ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ : List[Any] = cardinality
else:
A__ : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ : Any = embedding_dimension
else:
A__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
A__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
A__ : Any = d_model
A__ : List[Any] = encoder_attention_heads
A__ : List[str] = decoder_attention_heads
A__ : int = encoder_ffn_dim
A__ : str = decoder_ffn_dim
A__ : Tuple = encoder_layers
A__ : List[str] = decoder_layers
A__ : Any = dropout
A__ : Any = attention_dropout
A__ : Dict = activation_dropout
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = decoder_layerdrop
A__ : List[str] = activation_function
A__ : Dict = init_std
A__ : List[str] = use_cache
# Informer
A__ : Tuple = attention_type
A__ : int = sampling_factor
A__ : Dict = distil
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 362
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296
| 0
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCamelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase__ = tau * frequency / samplerate
UpperCamelCase__ = sin(__A )
UpperCamelCase__ = cos(__A )
UpperCamelCase__ = _sin / (2 * q_factor)
UpperCamelCase__ = (1 - _cos) / 2
UpperCamelCase__ = 1 - _cos
UpperCamelCase__ = 1 + alpha
UpperCamelCase__ = -2 * _cos
UpperCamelCase__ = 1 - alpha
UpperCamelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase__ = tau * frequency / samplerate
UpperCamelCase__ = sin(__A )
UpperCamelCase__ = cos(__A )
UpperCamelCase__ = _sin / (2 * q_factor)
UpperCamelCase__ = (1 + _cos) / 2
UpperCamelCase__ = -1 - _cos
UpperCamelCase__ = 1 + alpha
UpperCamelCase__ = -2 * _cos
UpperCamelCase__ = 1 - alpha
UpperCamelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase__ = tau * frequency / samplerate
UpperCamelCase__ = sin(__A )
UpperCamelCase__ = cos(__A )
UpperCamelCase__ = _sin / (2 * q_factor)
UpperCamelCase__ = _sin / 2
UpperCamelCase__ = 0
UpperCamelCase__ = -ba
UpperCamelCase__ = 1 + alpha
UpperCamelCase__ = -2 * _cos
UpperCamelCase__ = 1 - alpha
UpperCamelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase__ = tau * frequency / samplerate
UpperCamelCase__ = sin(__A )
UpperCamelCase__ = cos(__A )
UpperCamelCase__ = _sin / (2 * q_factor)
UpperCamelCase__ = 1 - alpha
UpperCamelCase__ = -2 * _cos
UpperCamelCase__ = 1 + alpha
UpperCamelCase__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase__ = tau * frequency / samplerate
UpperCamelCase__ = sin(__A )
UpperCamelCase__ = cos(__A )
UpperCamelCase__ = _sin / (2 * q_factor)
UpperCamelCase__ = 10 ** (gain_db / 40)
UpperCamelCase__ = 1 + alpha * big_a
UpperCamelCase__ = -2 * _cos
UpperCamelCase__ = 1 - alpha * big_a
UpperCamelCase__ = 1 + alpha / big_a
UpperCamelCase__ = -2 * _cos
UpperCamelCase__ = 1 - alpha / big_a
UpperCamelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase__ = tau * frequency / samplerate
UpperCamelCase__ = sin(__A )
UpperCamelCase__ = cos(__A )
UpperCamelCase__ = _sin / (2 * q_factor)
UpperCamelCase__ = 10 ** (gain_db / 40)
UpperCamelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCamelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCamelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCamelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCamelCase__ = 2 * sqrt(__A ) * alpha
UpperCamelCase__ = big_a * (pmc + aaa)
UpperCamelCase__ = 2 * big_a * mpc
UpperCamelCase__ = big_a * (pmc - aaa)
UpperCamelCase__ = ppmc + aaa
UpperCamelCase__ = -2 * pmpc
UpperCamelCase__ = ppmc - aaa
UpperCamelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
UpperCamelCase__ = tau * frequency / samplerate
UpperCamelCase__ = sin(__A )
UpperCamelCase__ = cos(__A )
UpperCamelCase__ = _sin / (2 * q_factor)
UpperCamelCase__ = 10 ** (gain_db / 40)
UpperCamelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCamelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCamelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCamelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCamelCase__ = 2 * sqrt(__A ) * alpha
UpperCamelCase__ = big_a * (ppmc + aaa)
UpperCamelCase__ = -2 * big_a * pmpc
UpperCamelCase__ = big_a * (ppmc - aaa)
UpperCamelCase__ = pmc + aaa
UpperCamelCase__ = 2 * mpc
UpperCamelCase__ = pmc - aaa
UpperCamelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 80
|
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[str] = 1
while len(lowerCamelCase_) < 1E6:
constant.append(str(lowerCamelCase_))
i += 1
lowerCAmelCase__ : Union[str, Any] = ''''''.join(lowerCamelCase_)
return (
int(constant[0])
* int(constant[9])
* int(constant[99])
* int(constant[999])
* int(constant[9999])
* int(constant[99999])
* int(constant[999999])
)
if __name__ == "__main__":
print(solution())
| 129
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
__lowercase: Optional[int] = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
__lowercase: str = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE__( ) -> str:
'''simple docstring'''
UpperCamelCase__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCamelCase__ = bs[:]
UpperCamelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase__ = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ = char
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Dict = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any], a_ : Tuple, a_ : Tuple, a_ : str="replace", a_ : int="<s>", a_ : Any="</s>", a_ : Union[str, Any]="</s>", a_ : List[str]="<s>", a_ : Optional[Any]="<unk>", a_ : Tuple="<pad>", a_ : Dict="<mask>", a_ : Any=False, **a_ : Dict, ):
"""simple docstring"""
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else bos_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else eos_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else sep_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else cls_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else unk_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token
super().__init__(
errors=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, cls_token=a_, pad_token=a_, mask_token=a_, add_prefix_space=a_, **a_, )
with open(a_, encoding="utf-8" ) as vocab_handle:
UpperCamelCase__ = json.load(a_ )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ = errors # how to handle errors in decoding
UpperCamelCase__ = bytes_to_unicode()
UpperCamelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(a_, encoding="utf-8" ) as merges_handle:
UpperCamelCase__ = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = {}
UpperCamelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return len(self.encoder )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase_ ( self : Any, a_ : Union[str, Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase__ = tuple(a_ )
UpperCamelCase__ = get_pairs(a_ )
if not pairs:
return token
while True:
UpperCamelCase__ = min(a_, key=lambda a_ : self.bpe_ranks.get(a_, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ = bigram
UpperCamelCase__ = []
UpperCamelCase__ = 0
while i < len(a_ ):
try:
UpperCamelCase__ = word.index(a_, a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase__ = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ = tuple(a_ )
UpperCamelCase__ = new_word
if len(a_ ) == 1:
break
else:
UpperCamelCase__ = get_pairs(a_ )
UpperCamelCase__ = " ".join(a_ )
UpperCamelCase__ = word
return word
def lowercase_ ( self : List[Any], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = []
for token in re.findall(self.pat, a_ ):
UpperCamelCase__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_ ).split(" " ) )
return bpe_tokens
def lowercase_ ( self : int, a_ : Any ):
"""simple docstring"""
return self.encoder.get(a_, self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
return self.decoder.get(a_ )
def lowercase_ ( self : Tuple, a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = "".join(a_ )
UpperCamelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors )
return text
def lowercase_ ( self : Optional[int], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=a_, ensure_ascii=a_ ) + "\n" )
UpperCamelCase__ = 0
with open(a_, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase__ = token_index
writer.write(" ".join(a_ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : List[str], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : List[Any], a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def lowercase_ ( self : Any, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : int, a_ : str, a_ : Optional[Any]=False, **a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = kwargs.pop("add_prefix_space", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a_ ) > 0 and not text[0].isspace()):
UpperCamelCase__ = " " + text
return (text, kwargs)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
from manim import *
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = Text("CPU" , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = Text("GPU" , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = Text("Model" , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
lowerCamelCase_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase__ , buff=0.0 )
self.add(UpperCAmelCase__ )
model_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = Text("Loaded Checkpoint" , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase__ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(UpperCAmelCase__ ):
lowerCamelCase_ = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 )
target.move_to(UpperCAmelCase__ )
ckpt_arr.append(UpperCAmelCase__ )
lowerCamelCase_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__ , *UpperCAmelCase__ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase__ )
lowerCamelCase_ = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowerCamelCase_ = Text("Disk" , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase__ , run_time=3 ) , Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) )
lowerCamelCase_ = []
for i, rect in enumerate(UpperCAmelCase__ ):
lowerCamelCase_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(FadeOut(UpperCAmelCase__ ) )
lowerCamelCase_ = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ ) , )
self.wait()
| 55
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowercase ( _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE : Tuple = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_A ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : str = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : str = Spark(_A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Tuple = spark.range(10 ).repartition(2 )
SCREAMING_SNAKE_CASE : Any = [1, 0]
SCREAMING_SNAKE_CASE : Dict = _generate_iterable_examples(_A , _A ) # Reverse the partitions.
SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , _A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : List[str] = spark.range(10 ).repartition(1 )
SCREAMING_SNAKE_CASE : Optional[Any] = SparkExamplesIterable(_A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_A ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Any = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
SCREAMING_SNAKE_CASE : int = lambda _A : x.reverse()
SCREAMING_SNAKE_CASE : int = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [2, 1, 0] )
SCREAMING_SNAKE_CASE : Any = SparkExamplesIterable(_A ).shuffle_data_sources(_A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> str:
SCREAMING_SNAKE_CASE : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(_A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [0, 2] )
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE : int = SparkExamplesIterable(_A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [1, 3] )
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : str = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : Any = Spark(_A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 245
| 0
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=sys.maxsize ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''bilinear'''
__lowercase = max_size
__lowercase = short_edge_length
def __call__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = []
for img in imgs:
__lowercase , __lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowercase = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowercase = size * 1.0 / min(_lowerCamelCase ,_lowerCamelCase )
if h < w:
__lowercase , __lowercase = size, scale * w
else:
__lowercase , __lowercase = scale * h, size
if max(_lowerCamelCase ,_lowerCamelCase ) > self.max_size:
__lowercase = self.max_size * 1.0 / max(_lowerCamelCase ,_lowerCamelCase )
__lowercase = newh * scale
__lowercase = neww * scale
__lowercase = int(neww + 0.5 )
__lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowercase = Image.fromarray(_lowerCamelCase )
__lowercase = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
__lowercase = np.asarray(_lowerCamelCase )
else:
__lowercase = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowercase = nn.functional.interpolate(
_lowerCamelCase ,(newh, neww) ,mode=self.interp_method ,align_corners=_lowerCamelCase ).squeeze(0 )
img_augs.append(_lowerCamelCase )
return img_augs
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
__lowercase = cfg.INPUT.FORMAT
__lowercase = cfg.SIZE_DIVISIBILITY
__lowercase = cfg.PAD_VALUE
__lowercase = cfg.INPUT.MAX_SIZE_TEST
__lowercase = cfg.MODEL.DEVICE
__lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = lambda _lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = tuple(max(_lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
__lowercase = [im.shape[-2:] for im in images]
__lowercase = [
nn.functional.pad(
_lowerCamelCase ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(_lowerCamelCase ,_lowerCamelCase )
]
return torch.stack(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [images]
if single_image:
assert len(_lowerCamelCase ) == 1
for i in range(len(_lowerCamelCase ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(_lowerCamelCase ,images.pop(_lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
_lowerCamelCase ,torch.as_tensor(img_tensorize(images.pop(_lowerCamelCase ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
__lowercase = torch.tensor([im.shape[:2] for im in images] )
__lowercase = self.aug(_lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowercase = [self.normalizer(_lowerCamelCase ) for x in images]
# now pad them to do the following operations
__lowercase , __lowercase = self.pad(_lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowercase = torch.true_divide(_lowerCamelCase ,_lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple[int, int] ):
assert torch.isfinite(lowerCamelCase_ ).all(), "Box tensor contains infinite or NaN!"
__lowercase , __lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase_ )
| 217
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_SCREAMING_SNAKE_CASE = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
_SCREAMING_SNAKE_CASE = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith('''emb.''' ):
__lowercase = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__lowercase = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , lowerCamelCase_ )
# ffn -> feed_forward
__lowercase = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__lowercase = '''rwkv.''' + name
__lowercase = weight
return state_dict
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : int=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__lowercase = 5_0_2_7_7
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if index is not None:
__lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n'''
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 217
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A__ : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
A__ : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
A__ : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int=None , snake_case__ : Optional[int]=1 , snake_case__ : int="binary" , snake_case__ : List[str]=None ):
lowerCamelCase_ : str =fa_score(
snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ )
return {"f1": float(snake_case__ ) if score.size == 1 else score}
| 144
| 0
|
from string import ascii_uppercase
UpperCamelCase = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCamelCase = dict(enumerate(ascii_uppercase))
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = 0
while True:
if x == i:
_SCREAMING_SNAKE_CASE = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_SCREAMING_SNAKE_CASE = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_SCREAMING_SNAKE_CASE = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """THE GERMAN ATTACK"""
_SCREAMING_SNAKE_CASE = """SECRET"""
_SCREAMING_SNAKE_CASE = generate_key(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = cipher_text(snake_case__ ,snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ ,snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 125
|
UpperCamelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case__ ,snake_case__ ,snake_case__ )
order.append(snake_case__ )
return order
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case__ ,snake_case__ ,snake_case__ )
return component
def __lowerCamelCase ( snake_case__ ) -> list[list[int]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ ) * [False]
_SCREAMING_SNAKE_CASE = {vert: [] for vert in range(len(snake_case__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case__ )
_SCREAMING_SNAKE_CASE = []
for i, was_visited in enumerate(snake_case__ ):
if not was_visited:
order += topology_sort(snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = len(snake_case__ ) * [False]
for i in range(len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = order[len(snake_case__ ) - i - 1]
if not visited[vert]:
_SCREAMING_SNAKE_CASE = find_components(snake_case__ ,snake_case__ ,snake_case__ )
components_list.append(snake_case__ )
return components_list
| 125
| 1
|
'''simple docstring'''
import math
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 1:
lowercase_ : Union[str, Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowercase_ : Any = int(math.log(number // 3 , 2 ) ) + 2
lowercase_ : List[str] = [3, 5]
lowercase_ : Any = 2
lowercase_ : Union[str, Any] = 3
for block in range(1 , __SCREAMING_SNAKE_CASE ):
for _ in range(__SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
_lowercase : Any = 0
try:
_lowercase : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 93
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = int(__a )
# Initialize Result
snake_case_ : Tuple = []
# Traverse through all denomination
for denomination in reversed(__a ):
# Find denominations
while int(__a ) >= int(__a ):
total_value -= int(__a )
answer.append(__a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_SCREAMING_SNAKE_CASE = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
_SCREAMING_SNAKE_CASE = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_SCREAMING_SNAKE_CASE = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
_SCREAMING_SNAKE_CASE = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
_SCREAMING_SNAKE_CASE = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 88
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list , _UpperCAmelCase : int = 0 ) -> list:
"""simple docstring"""
_UpperCAmelCase : str = length or len(_UpperCAmelCase )
_UpperCAmelCase : Tuple = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = list_data[i + 1], list_data[i]
_UpperCAmelCase : str = True
return list_data if not swapped else bubble_sort(_UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase_ ( _UpperCAmelCase : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
_UpperCAmelCase : Any = XGBClassifier()
classifier.fit(_UpperCAmelCase , _UpperCAmelCase )
return classifier
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : List[str] = load_iris()
_UpperCAmelCase , _UpperCAmelCase : Dict = data_handling(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = train_test_split(
_UpperCAmelCase , _UpperCAmelCase , test_size=0.2_5 )
_UpperCAmelCase : Optional[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCAmelCase : Tuple = xgboost(_UpperCAmelCase , _UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , display_labels=_UpperCAmelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 31
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Iterator[int]:
"""simple docstring"""
a__ : Optional[int] = 2
while True:
if is_prime(_lowercase):
yield num
num += 1
def lowerCAmelCase_ ( _lowercase : int = 200_0000) -> int:
"""simple docstring"""
return sum(takewhile(lambda _lowercase: x < n , prime_generator()))
if __name__ == "__main__":
print(f'{solution() = }')
| 368
|
from __future__ import annotations
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif stress < 0:
raise ValueError("""Stress cannot be negative""")
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""")
elif area < 0:
raise ValueError("""Area cannot be negative""")
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """biogpt"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any]=4_2_3_8_4 , UpperCamelCase__ : str=1_0_2_4 , UpperCamelCase__ : List[Any]=2_4 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=4_0_9_6 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_2_4 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Optional[Any]=1e-12 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : str=2 , **UpperCamelCase__ : Dict , )-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: List[Any] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: List[str] = num_hidden_layers
__lowerCAmelCase: Optional[Any] = num_attention_heads
__lowerCAmelCase: List[Any] = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase: int = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = initializer_range
__lowerCAmelCase: List[Any] = layer_norm_eps
__lowerCAmelCase: str = scale_embedding
__lowerCAmelCase: Optional[Any] = use_cache
__lowerCAmelCase: Optional[int] = layerdrop
__lowerCAmelCase: Optional[int] = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
| 217
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = """deformable_detr"""
SCREAMING_SNAKE_CASE_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , UpperCamelCase__ : int=True , UpperCamelCase__ : str=None , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3_0_0 , UpperCamelCase__ : Optional[int]=1_0_2_4 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=1_0_2_4 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : str=1_0_2_4 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]="relu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]="sine" , UpperCamelCase__ : Any="resnet50" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=3_0_0 , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Optional[Any]=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.25 , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Dict , )-> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__lowerCAmelCase: List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: int = backbone_config.get("model_type")
__lowerCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase: Any = config_class.from_dict(UpperCamelCase__)
__lowerCAmelCase: int = use_timm_backbone
__lowerCAmelCase: Any = backbone_config
__lowerCAmelCase: Tuple = num_channels
__lowerCAmelCase: str = num_queries
__lowerCAmelCase: List[str] = max_position_embeddings
__lowerCAmelCase: List[Any] = d_model
__lowerCAmelCase: Union[str, Any] = encoder_ffn_dim
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: List[str] = encoder_attention_heads
__lowerCAmelCase: Any = decoder_ffn_dim
__lowerCAmelCase: Union[str, Any] = decoder_layers
__lowerCAmelCase: List[Any] = decoder_attention_heads
__lowerCAmelCase: List[Any] = dropout
__lowerCAmelCase: Optional[Any] = attention_dropout
__lowerCAmelCase: Union[str, Any] = activation_dropout
__lowerCAmelCase: Union[str, Any] = activation_function
__lowerCAmelCase: Dict = init_std
__lowerCAmelCase: int = init_xavier_std
__lowerCAmelCase: str = encoder_layerdrop
__lowerCAmelCase: Union[str, Any] = auxiliary_loss
__lowerCAmelCase: List[Any] = position_embedding_type
__lowerCAmelCase: str = backbone
__lowerCAmelCase: Tuple = use_pretrained_backbone
__lowerCAmelCase: int = dilation
# deformable attributes
__lowerCAmelCase: Union[str, Any] = num_feature_levels
__lowerCAmelCase: Optional[Any] = encoder_n_points
__lowerCAmelCase: Dict = decoder_n_points
__lowerCAmelCase: Optional[Any] = two_stage
__lowerCAmelCase: Tuple = two_stage_num_proposals
__lowerCAmelCase: int = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
__lowerCAmelCase: str = class_cost
__lowerCAmelCase: List[str] = bbox_cost
__lowerCAmelCase: List[str] = giou_cost
# Loss coefficients
__lowerCAmelCase: Tuple = mask_loss_coefficient
__lowerCAmelCase: int = dice_loss_coefficient
__lowerCAmelCase: Any = bbox_loss_coefficient
__lowerCAmelCase: str = giou_loss_coefficient
__lowerCAmelCase: int = eos_coefficient
__lowerCAmelCase: Tuple = focal_alpha
__lowerCAmelCase: Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__)
@property
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
return self.d_model
def lowercase_ ( self : Union[str, Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowerCAmelCase: str = self.backbone_config.to_dict()
__lowerCAmelCase: Tuple = self.__class__.model_type
return output
| 217
| 1
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase_ ={
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a_ ( _lowercase ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a_ ( _lowercase , _lowercase ):
if args.student_type == "roberta":
_UpperCamelCase : int = False
elif args.student_type == "gpt2":
_UpperCamelCase : Any = False
def a_ ( _lowercase , _lowercase ):
if args.student_type == "roberta":
_UpperCamelCase : int = False
def a_ ( ):
_UpperCamelCase : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_lowercase , required=_lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_lowercase , required=_lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_lowercase , required=_lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_lowercase , type=_lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_lowercase , required=_lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=_lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_lowercase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=_lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=_lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=_lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=_lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_lowercase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_lowercase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_lowercase , default=4000 , help='''Checkpoint interval.''' )
_UpperCamelCase : Dict = parser.parse_args()
sanity_checks(_lowercase )
# ARGS #
init_gpu_params(_lowercase )
set_seed(_lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_lowercase ) , _lowercase , indent=4 )
git_log(args.dump_path )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = MODEL_CLASSES[args.student_type]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_UpperCamelCase : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_UpperCamelCase : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_UpperCamelCase : Optional[int] = tokenizer.all_special_tokens.index(_lowercase )
_UpperCamelCase : Any = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
_UpperCamelCase : Union[str, Any] = special_tok_ids
_UpperCamelCase : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
_UpperCamelCase : Optional[Any] = pickle.load(_lowercase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
_UpperCamelCase : List[str] = pickle.load(_lowercase )
_UpperCamelCase : Dict = np.maximum(_lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_UpperCamelCase : Optional[int] = 0.0 # do not predict special tokens
_UpperCamelCase : List[Any] = torch.from_numpy(_lowercase )
else:
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : str = LmSeqsDataset(params=_lowercase , data=_lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
_UpperCamelCase : Dict = student_config_class.from_pretrained(args.student_config )
_UpperCamelCase : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_UpperCamelCase : Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowercase )
else:
_UpperCamelCase : Tuple = student_model_class(_lowercase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
_UpperCamelCase : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowercase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowercase , _lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowercase , _lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_UpperCamelCase : Union[str, Any] = Distiller(
params=_lowercase , dataset=_lowercase , token_probs=_lowercase , student=_lowercase , teacher=_lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 128
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''feature_extractor''']
UpperCamelCase = '''TvltImageProcessor'''
UpperCamelCase = '''TvltFeatureExtractor'''
def __init__( self : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(image_processor=lowerCAmelCase__, feature_extractor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : int = feature_extractor
def __call__( self : List[str], lowerCAmelCase__ : Optional[int]=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Dict=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : str=False, *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : Optional[int], ) -> Dict:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
_UpperCamelCase : Optional[int] = None
if images is not None:
_UpperCamelCase : Optional[int] = self.image_processor(lowerCAmelCase__, mask_pixel=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if images_mixed is not None:
_UpperCamelCase : str = self.image_processor(lowerCAmelCase__, is_mixed=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if audio is not None:
_UpperCamelCase : Union[str, Any] = self.feature_extractor(
lowerCAmelCase__, *lowerCAmelCase__, sampling_rate=lowerCAmelCase__, mask_audio=lowerCAmelCase__, **lowerCAmelCase__ )
_UpperCamelCase : str = {}
if audio is not None:
output_dict.update(lowerCAmelCase__ )
if images is not None:
output_dict.update(lowerCAmelCase__ )
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase__ )
return output_dict
@property
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 128
| 1
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
snake_case_ : Dict = get_logger(__name__)
class __a :
__a : List[Any] = "dummy_data"
__a : Optional[int] = "datasets"
__a : Dict = False
def __init__( self : str , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Union[Version, str] , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[List[Callable]] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = dataset_name
UpperCAmelCase_ : Union[str, Any] = cache_dir
UpperCAmelCase_ : List[Any] = use_local_dummy_data
UpperCAmelCase_ : Optional[int] = config
# download_callbacks take a single url as input
UpperCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase_ : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase_ : List[Any] = str(__magic_name__ )
# to be downloaded
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Tuple = None
@property
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if self._dummy_file is None:
UpperCAmelCase_ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase_ : Union[str, Any] = cached_path(
__magic_name__ , cache_dir=self.cache_dir , extract_compressed_file=__magic_name__ , force_extract=__magic_name__ )
return os.path.join(__magic_name__ , self.dummy_file_name )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
if self._bucket_url is None:
UpperCAmelCase_ : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[Any] , *__magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase_ : str = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase_ : str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__magic_name__ , __magic_name__ ):
return self.create_dummy_data_dict(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , (list, tuple) ):
return self.create_dummy_data_list(__magic_name__ , __magic_name__ )
else:
return self.create_dummy_data_single(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , *__magic_name__ : Any ) -> str:
"""simple docstring"""
return self.download_and_extract(__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.download_and_extract(__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Any , *__magic_name__ : Tuple , **__magic_name__ : Any ) -> Dict:
"""simple docstring"""
return path
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return {}
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__magic_name__ , __magic_name__ ):
for single_url in single_urls:
download_callback(__magic_name__ )
else:
UpperCAmelCase_ : Optional[int] = single_urls
download_callback(__magic_name__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Any = [os.path.join(__magic_name__ , urllib.parse.quote_plus(Path(__magic_name__ ).name ) ) for x in single_urls]
else:
UpperCAmelCase_ : List[str] = single_urls
UpperCAmelCase_ : List[str] = os.path.join(__magic_name__ , urllib.parse.quote_plus(Path(__magic_name__ ).name ) )
UpperCAmelCase_ : Any = value
# make sure that values are unique
if all(isinstance(__magic_name__ , __magic_name__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase_ : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase_ : Any = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , __magic_name__ ) ) for url in data_url )
UpperCAmelCase_ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase_ : Dict = [data_url[0]] * len(__magic_name__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__magic_name__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Optional[Any] = os.path.join(__magic_name__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(__magic_name__ )
return dummy_data_list
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(__magic_name__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : str = os.path.join(__magic_name__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(__magic_name__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : str ) -> Any:
"""simple docstring"""
def _iter_archive_members(__magic_name__ : List[Any] ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase_ : str = Path(self.dummy_file ).parent
UpperCAmelCase_ : Optional[Any] = path.relative_to(__magic_name__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase_ : str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__magic_name__ )
UpperCAmelCase_ : str = Path(__magic_name__ )
UpperCAmelCase_ : Dict = _iter_archive_members(__magic_name__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(__magic_name__ ).as_posix(), file_path.open('''rb''' )
def UpperCAmelCase__ ( self : str , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Any = [paths]
for path in paths:
if os.path.isfile(__magic_name__ ):
if os.path.basename(__magic_name__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__magic_name__ ):
if os.path.basename(__magic_name__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(__magic_name__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(__magic_name__ , __magic_name__ )
| 125
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
snake_case_ : List[str] = logging.getLogger(__name__)
@dataclass
class __a :
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__a : int = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the training data."} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the validation data."} )
__a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "A csv or a json file containing the test data."} )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
UpperCAmelCase_ : Dict = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase_ : Union[str, Any] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__a : str = field(
default=lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCamelCase_ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
UpperCAmelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase_ : Dict = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase_ : Dict = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase_ : int = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
UpperCAmelCase_ : List[Any] = load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase_ : int = load_dataset('''json''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase_ : Optional[Any] = raw_datasets['''train'''].features['''label'''].names
UpperCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# load tapex tokenizer
UpperCAmelCase_ : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[int] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase_ : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase_ : Tuple = {'''Refused''': 0, '''Entailed''': 1}
UpperCAmelCase_ : Tuple = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ : int = min(data_args.max_seq_length, tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCAmelCase_ : List[str] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
UpperCAmelCase_ : Any = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0] )
return _table_pd
UpperCAmelCase_ : Optional[Any] = examples['''statement''']
UpperCAmelCase_ : Union[str, Any] = list(map(_convert_table_text_to_pandas, examples['''table_text'''] ) )
UpperCAmelCase_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, padding=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, truncation=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
UpperCAmelCase_ : List[str] = raw_datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on dataset''', )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase_ : Any = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Dict = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase_ : str = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
UpperCAmelCase_ : Dict = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
UpperCAmelCase_ : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ), 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ):
UpperCAmelCase_ : Any = p.predictions[0] if isinstance(p.predictions, SCREAMING_SNAKE_CASE__ ) else p.predictions
UpperCAmelCase_ : Optional[int] = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase_ : str = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 )
else:
UpperCAmelCase_ : List[Any] = None
# Initialize our Trainer
UpperCAmelCase_ : int = Trainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, )
# Training
if training_args.do_train:
UpperCAmelCase_ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Optional[int] = last_checkpoint
UpperCAmelCase_ : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = train_result.metrics
UpperCAmelCase_ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
UpperCAmelCase_ : List[Any] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase_ : Optional[int] = predict_dataset.remove_columns('''label''' )
UpperCAmelCase_ : Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE__, metric_key_prefix='''predict''' ).predictions
UpperCAmelCase_ : Any = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
UpperCAmelCase_ : int = os.path.join(training_args.output_dir, '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Dict = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
UpperCAmelCase_ : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 125
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A ( UpperCAmelCase_ ):
def lowercase_ (self : int , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
with open(__UpperCAmelCase , encoding="utf-8" ) as input_file:
UpperCAmelCase__ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ = input_file.read()
UpperCAmelCase__ = regexp.search(__UpperCAmelCase )
return match
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> Any:
"""simple docstring"""
with open(__UpperCAmelCase , encoding="utf-8" ) as input_file:
UpperCAmelCase__ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ = regexp.finditer(__UpperCAmelCase )
UpperCAmelCase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = Path("./datasets" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = Path("./datasets" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 143
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__(self : Tuple , __UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = False
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 143
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88
| 1
|
from collections import deque
from .hash_table import HashTable
class A ( A_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase )
__lowercase= self.values[key]
def _A (self ):
return (
sum(self.charge_factor - len(lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase , lowerCAmelCase )
| 304
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 1
|
"""simple docstring"""
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self: int , UpperCamelCase: List[Any]=None ):
"""simple docstring"""
A__ = data
A__ = None
def __repr__( self: Union[str, Any] ):
"""simple docstring"""
A__ = []
A__ = self
while temp:
string_rep.append(f"""{temp.data}""" )
A__ = temp.next
return "->".join(_lowerCamelCase )
def _snake_case ( UpperCAmelCase_ : Dict ):
if not elements_list:
raise Exception("""The Elements List is empty""" )
A__ = A__ = Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
A__ = Node(elements_list[i] )
A__ = current.next
return head
def _snake_case ( UpperCAmelCase_ : List[Any] ):
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def _snake_case ( ):
from doctest import testmod
testmod()
A__ = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(__UpperCamelCase )
print("""Elements in Reverse:""" )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 335
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
self.assertIsInstance(encoding.words, _lowerCamelCase )
self.assertIsInstance(encoding.boxes, _lowerCamelCase )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
# with apply_OCR = True
__A = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' )
__A = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _lowerCamelCase )
self.assertListEqual(encoding.boxes, _lowerCamelCase )
# with apply_OCR = False
__A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 266
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
def __init__( self : int , snake_case_ : Union[str, Any] , snake_case_ : Dict=13 , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=24 , snake_case_ : Any=16 , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=32 , snake_case_ : str=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Any="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : str=10 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : List[str]=None , snake_case_ : Any=2 , snake_case_ : Any=2 , ) -> int:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = patch_size
A__ = max_length
A__ = num_mel_bins
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = frequency_stride
A__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A__ = (self.max_length - self.patch_size) // self.time_stride + 1
A__ = frequency_out_dimension * time_out_dimension
A__ = num_patches + 2
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, input_values, labels
def __magic_name__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __magic_name__ ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[int] ) -> int:
'''simple docstring'''
A__ = ASTModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __magic_name__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ = ASTModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __magic_name__ ( self : List[str] ) -> int:
'''simple docstring'''
pass
def __magic_name__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["input_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ASTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> str:
A__ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
A__, A__ = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Tuple ) -> int:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = self.default_feature_extractor
A__ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(snake_case_ )
A__ = self.default_feature_extractor
A__, A__ = prepare_audio()
A__ = audio.squeeze().numpy()
A__ = feature_extractor(snake_case_ , sampling_rate=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
A__ = model(**snake_case_ )
# verify the logits
A__ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 230
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
A__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
A__ = Stack()
A__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase_ )
elif i == ")":
# RULE 4
A__ = operator_stack.peek()
operator_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operators[opr](lowercase_ , lowercase_ )
operand_stack.push(lowercase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 230
| 1
|
from __future__ import annotations
UpperCAmelCase : Union[str, Any] ={
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase_ = {}
UpperCamelCase_ = source_vertex
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = {self.source_vertex}
UpperCamelCase_ = None
UpperCamelCase_ = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
UpperCamelCase_ = vertex
queue.append(snake_case__ )
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase_ = self.parent.get(snake_case__ )
if target_vertex_parent is None:
UpperCamelCase_ = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
UpperCAmelCase : str =Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 128
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Any ="""
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8):
UpperCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=5_12 , _lowerCAmelCase=5_12):
UpperCamelCase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1)
UpperCamelCase_ = np.array(pil_image.convert("RGB"))
UpperCamelCase_ = arr.astype(np.floataa) / 127.5 - 1
UpperCamelCase_ = np.transpose(_lowerCAmelCase , [2, 0, 1])
UpperCamelCase_ = torch.from_numpy(_lowerCAmelCase).unsqueeze(0)
return image
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
UpperCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = min(int(num_inference_steps * strength ) , snake_case__ )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
UpperCamelCase_ = image.to(device=snake_case__ , dtype=snake_case__ )
UpperCamelCase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase_ = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
else:
UpperCamelCase_ = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
UpperCamelCase_ = self.movq.config.scaling_factor * init_latents
UpperCamelCase_ = torch.cat([init_latents] , dim=0 )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
UpperCamelCase_ = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = init_latents
return latents
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase_ , UpperCamelCase_ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
UpperCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 0.3 , snake_case__ = 1 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
UpperCamelCase_ = self._execution_device
UpperCamelCase_ = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
UpperCamelCase_ = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase_ = image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase_ = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
UpperCamelCase_ = image.to(dtype=image_embeds.dtype , device=snake_case__ )
UpperCamelCase_ = self.movq.encode(snake_case__ )["latents"]
UpperCamelCase_ = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase_ , UpperCamelCase_ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
UpperCamelCase_ = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = {"image_embeds": image_embeds}
UpperCamelCase_ = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ , UpperCamelCase_ = variance_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
UpperCamelCase_ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase_ = image * 0.5 + 0.5
UpperCamelCase_ = image.clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 128
| 1
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_UpperCamelCase = get_tests_dir("""fixtures/dummy-config.json""")
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoConfig.from_pretrained(A )
self.assertIsInstance(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoConfig.from_pretrained(A )
self.assertIsInstance(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(A ,A )
def _UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(A ,"""fake-roberta""" )
os.makedirs(A ,exist_ok=A )
with open(os.path.join(A ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(A )
self.assertEqual(type(A ) ,A )
def _UpperCamelCase ( self ):
try:
AutoConfig.register("""custom""" ,A )
# Wrong model type will raise an error
with self.assertRaises(A ):
AutoConfig.register("""model""" ,A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A ):
AutoConfig.register("""bert""" ,A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
UpperCAmelCase = AutoConfig.from_pretrained(A )
self.assertIsInstance(A ,A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
A ,"""bert-base is not a local folder and is not a valid model identifier""" ):
UpperCAmelCase = AutoConfig.from_pretrained("""bert-base""" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
A ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCAmelCase = AutoConfig.from_pretrained(A ,revision="""aaaaaa""" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
A ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def _UpperCamelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A ):
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A ):
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A )
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
UpperCAmelCase = AutoConfig.from_pretrained(A ,trust_remote_code=A )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def _UpperCamelCase ( self ):
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("""new-model""" ,A )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=A )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 356
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_UpperCamelCase = None
_UpperCamelCase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_UpperCamelCase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _a ( _snake_case , _snake_case=1 , _snake_case=256 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( _snake_case ):
"""simple docstring"""
with open(_snake_case , """r""" ) as f:
return json.load(_snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
with open(_snake_case , """w""" ) as f:
json.dump(_snake_case , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=True ):
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = os.path.join(_snake_case , """tmp""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = read_json(os.path.join(_snake_case , """params.json""" ) )
UpperCAmelCase = NUM_SHARDS[model_size]
UpperCAmelCase = params["""n_layers"""]
UpperCAmelCase = params["""n_heads"""]
UpperCAmelCase = n_heads // num_shards
UpperCAmelCase = params["""dim"""]
UpperCAmelCase = dim // n_heads
UpperCAmelCase = 10000.0
UpperCAmelCase = 1.0 / (base ** (torch.arange(0 , _snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
UpperCAmelCase = n_heads_per_shard // num_key_value_heads
UpperCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase = n_heads
UpperCAmelCase = n_heads_per_shard
UpperCAmelCase = dim
# permute for sliced rotary
def permute(_snake_case , _snake_case=n_heads , _snake_case=dim , _snake_case=dim ):
return w.view(_snake_case , dima // n_heads // 2 , 2 , _snake_case ).transpose(1 , 2 ).reshape(_snake_case , _snake_case )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase = torch.load(os.path.join(_snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
UpperCAmelCase = [
torch.load(os.path.join(_snake_case , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" )
for i in range(_snake_case )
]
UpperCAmelCase = 0
UpperCAmelCase = {"""weight_map""": {}}
for layer_i in range(_snake_case ):
UpperCAmelCase = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) )
UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case , )
UpperCAmelCase = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase = inv_freq
for k, v in state_dict.items():
UpperCAmelCase = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
UpperCAmelCase = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
UpperCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_snake_case )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
# Write configs
UpperCAmelCase = {"""total_size""": param_count * 2}
write_json(_snake_case , os.path.join(_snake_case , """pytorch_model.bin.index.json""" ) )
UpperCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
UpperCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
UpperCAmelCase = LlamaConfig(
hidden_size=_snake_case , intermediate_size=compute_intermediate_size(_snake_case , _snake_case , _snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_snake_case , )
config.save_pretrained(_snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
UpperCAmelCase = LlamaForCausalLM.from_pretrained(_snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=_snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_snake_case , safe_serialization=_snake_case )
shutil.rmtree(_snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
UpperCAmelCase = tokenizer_class(_snake_case )
tokenizer.save_pretrained(_snake_case )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=_snake_case , help="""Whether or not to save using `safetensors`.""" )
UpperCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , _snake_case )
if __name__ == "__main__":
main()
| 234
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __snake_case ( _lowerCamelCase ):
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = tempfile.mkdtemp()
snake_case__ : Optional[int] = 8
# DPR tok
snake_case__ : Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ : str = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case__ : Tuple = os.path.join(__UpperCamelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case__ : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case__ : int = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
snake_case__ : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : Dict = {'unk_token': '<unk>'}
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case__ : int = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : List[Any] = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def __a ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __a ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = os.path.join(self.tmpdirname , 'rag_tokenizer' )
snake_case__ : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__UpperCamelCase )
rag_tokenizer.save_pretrained(__UpperCamelCase )
snake_case__ : str = RagTokenizer.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __UpperCamelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __UpperCamelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
snake_case__ : List[str] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
snake_case__ : int = tokenizer(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
snake_case__ : Tuple = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
snake_case__ : List[str] = tokenizer(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 143
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase__ : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase__ : Optional[int] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase__ : Optional[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase__ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase__ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase__ : Dict = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase__ : Optional[int] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def UpperCamelCase__ ( ) -> Any:
snake_case__ , snake_case__ : List[str] = randrange(len(A__ ) ), randrange(len(A__ ) )
snake_case__ : str = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase__ ( A__ = 100 ) -> Optional[int]:
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Any:
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Dict:
snake_case__ : Optional[int] = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> str:
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = [PokerHand(A__ ) for hand in SORTED_HANDS]
snake_case__ : Optional[Any] = poker_hands.copy()
shuffle(A__ )
snake_case__ : Tuple = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def UpperCamelCase__ ( ) -> str:
# Test that five high straights are compared correctly.
snake_case__ : int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase__ ( ) -> Union[str, Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
snake_case__ : Optional[int] = PokerHand('2C 4S AS 3D 5C' )
snake_case__ : Optional[int] = True
snake_case__ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase__ ( ) -> List[str]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
snake_case__ : Any = 0
snake_case__ : Optional[Any] = os.path.abspath(os.path.dirname(A__ ) )
snake_case__ : List[str] = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
snake_case__ : Tuple = line[:14].strip()
snake_case__ : List[str] = line[15:].strip()
snake_case__ , snake_case__ : Any = PokerHand(A__ ), PokerHand(A__ )
snake_case__ : Tuple = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 143
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def __lowerCamelCase ( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCamelCase = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(lowerCamelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCamelCase = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(lowerCamelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCamelCase = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(lowerCamelCase__ )
lowerCamelCase = []
for value in value_array:
lowerCamelCase = euclidean(lowerCamelCase__ , dataset[0] )
lowerCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCamelCase = euclidean(lowerCamelCase__ , lowerCamelCase__ )
if dist > temp_dist:
lowerCamelCase = temp_dist
lowerCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray ):
'''simple docstring'''
return np.dot(lowerCamelCase__ , lowerCamelCase__ ) / (norm(lowerCamelCase__ ) * norm(lowerCamelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
enable_full_determinism()
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = UNetaDModel
UpperCamelCase : Union[str, Any] = "sample"
@property
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = UNetaDModel
UpperCamelCase : Dict = "sample"
@property
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 4
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (4, 32, 32)
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model_accelerate.to(A )
model_accelerate.eval()
lowerCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
lowerCamelCase = model_accelerate(A , A )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=A , low_cpu_mem_usage=A )
model_normal_load.to(A )
model_normal_load.eval()
lowerCamelCase = model_normal_load(A , A )["""sample"""]
assert torch_all_close(A , A , rtol=1e-3 )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(A )
lowerCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-3 ) )
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = UNetaDModel
UpperCamelCase : Optional[int] = "sample"
@property
def __A ( self , A=(32, 32) ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = self.dummy_input
lowerCamelCase = floats_tensor((4, 3) + (2_56, 2_56) ).to(A )
lowerCamelCase = noise
lowerCamelCase = model(**A )
assert image is not None, "Make sure output is not None"
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (2_56, 2_56)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
| 66
| 1
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case__ ( tf.keras.layers.Layer):
def __init__( self : Dict , _A : Dict , _A : str , _A : Optional[int] , _A : Union[str, Any] , _A : Optional[Any]=1 , _A : Any=False , **_A : Optional[Any] ) -> Any:
super().__init__(**_A )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : List[Any] = d_embed
UpperCAmelCase_ : Any = d_proj
UpperCAmelCase_ : Tuple = cutoffs + [vocab_size]
UpperCAmelCase_ : List[Any] = [0] + self.cutoffs
UpperCAmelCase_ : Dict = div_val
UpperCAmelCase_ : Any = self.cutoffs[0]
UpperCAmelCase_ : Optional[int] = len(self.cutoffs ) - 1
UpperCAmelCase_ : Any = self.shortlist_size + self.n_clusters
UpperCAmelCase_ : Union[str, Any] = keep_order
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = []
def A ( self : str , _A : str ) -> Union[str, Any]:
if self.n_clusters > 0:
UpperCAmelCase_ : str = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_A , name='''cluster_weight''' )
UpperCAmelCase_ : Tuple = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_A , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase_ : Any = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_A , name=F"out_projs_._{i}" , )
self.out_projs.append(_A )
else:
self.out_projs.append(_A )
UpperCAmelCase_ : Any = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_A , name=F"out_layers_._{i}_._weight" , )
UpperCAmelCase_ : Optional[int] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_A , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Optional[Any] = self.d_embed // (self.div_val**i)
UpperCAmelCase_ : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_A , name=F"out_projs_._{i}" )
self.out_projs.append(_A )
UpperCAmelCase_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_A , name=F"out_layers_._{i}_._weight" , )
UpperCAmelCase_ : str = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_A , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(_A )
@staticmethod
def A ( _A : List[Any] , _A : Optional[int] , _A : List[Any] , _A : Union[str, Any]=None ) -> int:
UpperCAmelCase_ : Optional[int] = x
if proj is not None:
UpperCAmelCase_ : Dict = tf.einsum('''ibd,ed->ibe''' , _A , _A )
return tf.einsum('''ibd,nd->ibn''' , _A , _A ) + b
@staticmethod
def A ( _A : Union[str, Any] , _A : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[Any] = shape_list(_A )
UpperCAmelCase_ : Optional[int] = tf.range(lp_size[0] , dtype=target.dtype )
UpperCAmelCase_ : Tuple = tf.stack([r, target] , 1 )
return tf.gather_nd(_A , _A )
def A ( self : int , _A : Optional[Any] , _A : Union[str, Any] , _A : Any=True , _A : Optional[Any]=False ) -> List[Any]:
UpperCAmelCase_ : str = 0
if self.n_clusters == 0:
UpperCAmelCase_ : Any = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCAmelCase_ : Tuple = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A )
UpperCAmelCase_ : Any = tf.nn.log_softmax(_A , axis=-1 )
else:
UpperCAmelCase_ : Tuple = shape_list(_A )
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase_ : Optional[Any] = (target >= l_idx) & (target < r_idx)
UpperCAmelCase_ : List[str] = tf.where(_A )
UpperCAmelCase_ : int = tf.boolean_mask(_A , _A ) - l_idx
if self.div_val == 1:
UpperCAmelCase_ : Union[str, Any] = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase_ : Optional[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase_ : Union[str, Any] = self.out_layers[i][0]
UpperCAmelCase_ : int = self.out_layers[i][1]
if i == 0:
UpperCAmelCase_ : Optional[int] = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCAmelCase_ : Optional[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCAmelCase_ : List[str] = self._logit(_A , _A , _A , self.out_projs[0] )
UpperCAmelCase_ : Union[str, Any] = tf.nn.log_softmax(_A )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase_ : Any = tf.boolean_mask(_A , _A )
UpperCAmelCase_ : List[str] = self._gather_logprob(_A , _A )
else:
UpperCAmelCase_ : Any = self._logit(_A , _A , _A , self.out_projs[i] )
UpperCAmelCase_ : Union[str, Any] = tf.nn.log_softmax(_A )
UpperCAmelCase_ : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase_ : Tuple = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A )
if target is not None:
UpperCAmelCase_ : List[Any] = tf.boolean_mask(_A , _A )
UpperCAmelCase_ : Any = tf.boolean_mask(_A , _A )
UpperCAmelCase_ : Tuple = self._gather_logprob(_A , _A )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A ) )
UpperCAmelCase_ : int = tf.concat(_A , axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase_ : Optional[Any] = tf.reduce_mean(_A )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 304
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 0
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A__ = logging.get_logger(__name__)
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[int] = ["""pixel_values"""]
def __init__( self :int ,__lowercase :bool = True ,__lowercase :Dict[str, int] = None ,__lowercase :PILImageResampling = PILImageResampling.BICUBIC ,__lowercase :bool = True ,__lowercase :Dict[str, int] = None ,__lowercase :bool = True ,__lowercase :Union[int, float] = 1 / 2_5_5 ,__lowercase :bool = True ,__lowercase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,__lowercase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**__lowercase :List[Any] ,):
super().__init__(**__lowercase )
snake_case__ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
snake_case__ : int = get_size_dict(__lowercase ,default_to_square=__lowercase )
snake_case__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
snake_case__ : Any = get_size_dict(__lowercase ,param_name='''crop_size''' )
snake_case__ : Union[str, Any] = do_resize
snake_case__ : Dict = size
snake_case__ : Optional[int] = resample
snake_case__ : Dict = do_center_crop
snake_case__ : str = crop_size
snake_case__ : str = do_rescale
snake_case__ : int = rescale_factor
snake_case__ : str = do_normalize
snake_case__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self :Dict ,__lowercase :np.ndarray ,__lowercase :Dict[str, int] ,__lowercase :PILImageResampling = PILImageResampling.BICUBIC ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :List[Any] ,):
snake_case__ : str = get_size_dict(__lowercase ,default_to_square=__lowercase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case__ : Any = int((2_5_6 / 2_2_4) * size['''shortest_edge'''] )
snake_case__ : int = get_resize_output_image_size(__lowercase ,size=__lowercase ,default_to_square=__lowercase )
snake_case__ : List[str] = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__lowercase ,size=(size_dict['''height'''], size_dict['''width''']) ,resample=__lowercase ,data_format=__lowercase ,**__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :np.ndarray ,__lowercase :Dict[str, int] ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :Optional[int] ,):
snake_case__ : Any = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__lowercase ,size=(size['''height'''], size['''width''']) ,data_format=__lowercase ,**__lowercase )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :np.ndarray ,__lowercase :Union[int, float] ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :List[Any] ,):
return rescale(__lowercase ,scale=__lowercase ,data_format=__lowercase ,**__lowercase )
def __lowerCamelCase ( self :str ,__lowercase :np.ndarray ,__lowercase :Union[float, List[float]] ,__lowercase :Union[float, List[float]] ,__lowercase :Optional[Union[str, ChannelDimension]] = None ,**__lowercase :Optional[Any] ,):
return normalize(__lowercase ,mean=__lowercase ,std=__lowercase ,data_format=__lowercase ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :ImageInput ,__lowercase :Optional[bool] = None ,__lowercase :Optional[Dict[str, int]] = None ,__lowercase :PILImageResampling = None ,__lowercase :Optional[bool] = None ,__lowercase :Optional[Dict[str, int]] = None ,__lowercase :Optional[bool] = None ,__lowercase :Optional[float] = None ,__lowercase :Optional[bool] = None ,__lowercase :Optional[Union[float, Iterable[float]]] = None ,__lowercase :Optional[Union[float, Iterable[float]]] = None ,__lowercase :Optional[TensorType] = None ,__lowercase :ChannelDimension = ChannelDimension.FIRST ,**__lowercase :int ,):
snake_case__ : str = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[Any] = resample if resample is not None else self.resample
snake_case__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case__ : Tuple = size if size is not None else self.size
snake_case__ : List[Any] = get_size_dict(__lowercase ,default_to_square=__lowercase )
snake_case__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
snake_case__ : Optional[int] = get_size_dict(__lowercase ,param_name='''crop_size''' )
snake_case__ : str = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case__ : Tuple = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
snake_case__ : Dict = [self.resize(__lowercase ,__lowercase ,__lowercase ) for image in images]
if do_center_crop:
snake_case__ : Any = [self.center_crop(__lowercase ,__lowercase ) for image in images]
if do_rescale:
snake_case__ : Tuple = [self.rescale(__lowercase ,__lowercase ) for image in images]
if do_normalize:
snake_case__ : Optional[Any] = [self.normalize(__lowercase ,__lowercase ,__lowercase ) for image in images]
snake_case__ : int = [to_channel_dimension_format(__lowercase ,__lowercase ) for image in images]
snake_case__ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__lowercase ,tensor_type=__lowercase )
| 230
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''spiece.model'''}
A__ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class a ( __lowerCamelCase ):
def __init__( self :Union[str, Any] ,__lowercase :Optional[Any] ,__lowercase :int=False ,__lowercase :int=True ,__lowercase :Optional[int]=False ,__lowercase :List[str]="<s>" ,__lowercase :str="</s>" ,__lowercase :Dict="<unk>" ,__lowercase :Optional[int]="<sep>" ,__lowercase :Tuple="<pad>" ,__lowercase :Union[str, Any]="<cls>" ,__lowercase :Dict="<mask>" ,__lowercase :List[Any]=["<eop>", "<eod>"] ,__lowercase :Optional[Dict[str, Any]] = None ,**__lowercase :int ,):
snake_case__ : Any = AddedToken(__lowercase ,lstrip=__lowercase ,rstrip=__lowercase ) if isinstance(__lowercase ,__lowercase ) else mask_token
snake_case__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase ,remove_space=__lowercase ,keep_accents=__lowercase ,bos_token=__lowercase ,eos_token=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,additional_special_tokens=__lowercase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowercase ,)
snake_case__ : Optional[Any] = 3
snake_case__ : List[str] = do_lower_case
snake_case__ : Union[str, Any] = remove_space
snake_case__ : Tuple = keep_accents
snake_case__ : List[str] = vocab_file
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
snake_case__ : List[Any] = jieba
snake_case__ : Union[str, Any] = str.maketrans(''' \n''' ,'''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCamelCase ( self :Union[str, Any] ):
return len(self.sp_model )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :List[str] ):
snake_case__ : Optional[Any] = self.__dict__.copy()
snake_case__ : Optional[int] = None
return state
def __setstate__( self :int ,__lowercase :str ):
snake_case__ : Tuple = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
snake_case__ : List[Any] = {}
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self :Dict ,__lowercase :Optional[int] ):
if self.remove_space:
snake_case__ : int = ''' '''.join(inputs.strip().split() )
else:
snake_case__ : Tuple = inputs
snake_case__ : List[Any] = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
snake_case__ : Any = unicodedata.normalize('''NFKD''' ,__lowercase )
snake_case__ : Dict = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
snake_case__ : str = outputs.lower()
return outputs
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ):
snake_case__ : Dict = self.preprocess_text(__lowercase )
snake_case__ : Any = self.sp_model.encode(__lowercase ,out_type=__lowercase )
snake_case__ : List[str] = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
snake_case__ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case__ : Optional[Any] = cur_pieces[1:]
else:
snake_case__ : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Dict ):
return self.sp_model.PieceToId(__lowercase )
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ):
return self.sp_model.IdToPiece(__lowercase )
def __lowerCamelCase ( self :Tuple ,__lowercase :List[Any] ):
snake_case__ : Union[str, Any] = ''''''.join(__lowercase ).replace(__lowercase ,''' ''' ).strip()
return out_string
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self :str ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ,__lowercase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1]
return ([0] * len(__lowercase )) + [1, 1]
def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self :Optional[int] ,__lowercase :str ,__lowercase :Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Union[str, Any] = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase ,'''wb''' ) as fi:
snake_case__ : str = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def __lowerCamelCase ( self :Union[str, Any] ,*__lowercase :Optional[int] ,**__lowercase :Dict ):
snake_case__ : Dict = super()._decode(*__lowercase ,**__lowercase )
snake_case__ : List[Any] = text.replace(''' ''' ,'''''' ).replace('''\u2582''' ,''' ''' ).replace('''\u2583''' ,'''\n''' )
return text
| 230
| 1
|
def __A ( __lowerCamelCase = 5000_0000 ) -> int:
a = set()
a = int((limit - 24) ** (1 / 2) )
a = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowerCamelCase ) ) )
for primea in primes:
a = primea * primea
for primea in primes:
a = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
a = primea * primea * primea * primea
a = square + cube + tetr
if total >= limit:
break
ret.add(__lowerCamelCase )
return len(__lowerCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 347
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347
| 1
|
class _A :
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = None
__a = None
__a = graph
self._normalize_graph(lowerCamelCase__ , lowerCamelCase__)
__a = len(lowerCamelCase__)
__a = None
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
if sources is int:
__a = [sources]
if sinks is int:
__a = [sinks]
if len(lowerCamelCase__) == 0 or len(lowerCamelCase__) == 0:
return
__a = sources[0]
__a = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase__) > 1 or len(lowerCamelCase__) > 1:
__a = 0
for i in sources:
max_input_flow += sum(self.graph[i])
__a = len(self.graph) + 1
for room in self.graph:
room.insert(0 , 0)
self.graph.insert(0 , [0] * size)
for i in sources:
__a = max_input_flow
__a = 0
__a = len(self.graph) + 1
for room in self.graph:
room.append(0)
self.graph.append([0] * size)
for i in sinks:
__a = max_input_flow
__a = size - 1
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''')
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = algorithm(self)
class _A :
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = flow_network
__a = flow_network.verticesCount
__a = flow_network.sourceIndex
__a = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__a = flow_network.graph
__a = False
def _lowerCamelCase ( self : str):
'''simple docstring'''
if not self.executed:
self._algorithm()
__a = True
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
class _A ( UpperCAmelCase__ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
super().__init__(lowerCamelCase__)
# use this to save your result
__a = -1
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''')
return self.maximum_flow
class _A ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(lowerCamelCase__)
__a = [[0] * self.verticies_count for i in range(self.verticies_count)]
__a = [0] * self.verticies_count
__a = [0] * self.verticies_count
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__a = [
i
for i in range(self.verticies_count)
if i != self.source_index and i != self.sink_index
]
# move through list
__a = 0
while i < len(lowerCamelCase__):
__a = vertices_list[i]
__a = self.heights[vertex_index]
self.process_vertex(lowerCamelCase__)
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCamelCase__))
__a = 0
else:
i += 1
__a = sum(self.preflow[self.source_index])
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase__ , lowerCamelCase__)
self.relabel(lowerCamelCase__)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = None
for to_index in range(self.verticies_count):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__a = self.heights[to_index]
if min_height is not None:
__a = min_height + 1
if __name__ == "__main__":
__snake_case :Union[str, Any] = [0]
__snake_case :Any = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__snake_case :Union[str, Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__snake_case :Optional[Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__snake_case :Tuple = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 49
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {'UserAgent': UserAgent().random}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = script.contents[0]
_UpperCAmelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase__ :
def __init__( self : str , lowerCamelCase__ : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = F"""https://www.instagram.com/{username}/"""
_UpperCAmelCase : Dict = self.get_json()
def lowerCAmelCase__ ( self : Any ) ->dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = requests.get(self.url , headers=lowerCamelCase__ ).text
_UpperCAmelCase : Any = BeautifulSoup(lowerCamelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[Any] ) ->str:
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : List[Any] ) ->str:
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self.user_data["username"]
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self.user_data["biography"]
@property
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def lowerCAmelCase__ ( self : Tuple ) ->int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def lowerCAmelCase__ ( self : int ) ->bool:
'''simple docstring'''
return self.user_data["is_private"]
def __lowerCAmelCase (__lowerCAmelCase = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCAmelCase : Dict = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 234
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
lowerCAmelCase__ = {'''allegro/herbert-base-cased''': 514}
lowerCAmelCase__ = {}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[int] = HerbertTokenizer
def __init__( self : List[Any] ,lowercase__ : List[str]=None ,lowercase__ : List[str]=None ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Any]="<s>" ,lowercase__ : Any="<unk>" ,lowercase__ : Any="<pad>" ,lowercase__ : Tuple="<mask>" ,lowercase__ : Union[str, Any]="</s>" ,**lowercase__ : Optional[Any] ,):
super().__init__(
lowercase__ ,lowercase__ ,tokenizer_file=lowercase__ ,cls_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,mask_token=lowercase__ ,sep_token=lowercase__ ,**lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
__lowercase = self._tokenizer.model.save(lowercase__ ,name=lowercase__ )
return tuple(lowercase__ )
| 52
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : str ,lowercase__ : List[str]=1_3 ,lowercase__ : str=7 ,lowercase__ : Tuple=True ,lowercase__ : Dict=True ,lowercase__ : str=True ,lowercase__ : Optional[Any]=True ,lowercase__ : List[str]=9_9 ,lowercase__ : int=[1, 1, 2] ,lowercase__ : int=1 ,lowercase__ : Tuple=3_2 ,lowercase__ : Union[str, Any]=4 ,lowercase__ : Tuple=8 ,lowercase__ : Any=3_7 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.1 ,lowercase__ : int=0.1 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Union[str, Any]=5_1_2 ,lowercase__ : Dict=3 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : Any=3 ,lowercase__ : Tuple=4 ,lowercase__ : Dict=None ,lowercase__ : List[Any]=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : str ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : List[str] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Any ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : int = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 52
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple=1 ) -> Optional[int]:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str ) -> Any:
TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowerCAmelCase_ ( self: List[str] ) -> int:
# create estimator
snake_case_ :int = self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case_ :List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ :List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ :List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ :Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
| 66
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66
| 1
|
from __future__ import annotations
import math
def __lowerCamelCase ( __lowerCAmelCase : int ) -> list[int]:
if num <= 0:
snake_case = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(a__ )
snake_case = [True] * (num + 1)
snake_case = []
snake_case = 2
snake_case = int(math.sqrt(a__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , a__ ):
if sieve[i] is True:
snake_case = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(a__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 359
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3
| 0
|
from __future__ import annotations
import math
def _a ( a :Dict ) -> list[int]:
if num <= 0:
a = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(snake_case__ )
a = [True] * (num + 1)
a = []
a = 2
a = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
a = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = ['image_processor', 'tokenizer']
lowerCAmelCase : Optional[int] = 'CLIPImageProcessor'
lowerCAmelCase : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Tuple ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : int ):
_a : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,_UpperCAmelCase ,)
_a : Optional[int] = kwargs.pop('feature_extractor' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
def __call__( self : Union[str, Any] ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Any=None ,**_UpperCAmelCase : Tuple ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_a : Optional[int] = self.tokenizer(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if images is not None:
_a : List[str] = self.image_processor(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if text is not None and images is not None:
_a : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) ,tensor_type=_UpperCAmelCase )
def __lowercase ( self : Any ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Tuple ,*_UpperCAmelCase : Optional[Any] ,**_UpperCAmelCase : str ):
return self.tokenizer.decode(*_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : List[str] ):
_a : Tuple = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,_UpperCAmelCase ,)
return self.image_processor_class
@property
def __lowercase ( self : Union[str, Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,_UpperCAmelCase ,)
return self.image_processor
| 356
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 107
| 0
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 50_000_000 ) -> int:
snake_case_ = set()
snake_case_ = int((limit - 24) ** (1 / 2) )
snake_case_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
snake_case_ = primea * primea
for primea in primes:
snake_case_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case_ = primea * primea * primea * primea
snake_case_ = square + cube + tetr
if total >= limit:
break
ret.add(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347
| 1
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
lowerCAmelCase_ : str = np.array(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = (1, 2, 1)
lowerCAmelCase_ : str = (1, 1, 0, 7)
lowerCAmelCase_ : List[Any] = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = model.fit(disp=__UpperCamelCase , maxiter=600 , method="nm" )
lowerCAmelCase_ : Optional[Any] = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : int = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Dict = regressor.predict(__UpperCamelCase )
return y_pred[0]
def __lowerCamelCase ( __UpperCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
lowerCAmelCase_ : Optional[Any] = np.percentile(__UpperCamelCase , 25 )
lowerCAmelCase_ : List[Any] = np.percentile(__UpperCamelCase , 75 )
lowerCAmelCase_ : Union[str, Any] = qa - qa
lowerCAmelCase_ : List[Any] = qa - (iqr * 0.1)
return low_lim
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase_ : Tuple = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase__ = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase__ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowercase__ = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase__ = normalize_df[:, 2].tolist()
lowercase__ = normalize_df[:, 0].tolist()
lowercase__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase__ = normalize_df[:, [1, 2]].tolist()
lowercase__ = x[: len(x) - 1]
lowercase__ = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase__ = total_date[: len(total_date) - 1]
lowercase__ = total_user[: len(total_user) - 1]
lowercase__ = total_match[: len(total_match) - 1]
lowercase__ = total_date[len(total_date) - 1 :]
lowercase__ = total_user[len(total_user) - 1 :]
lowercase__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase__ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 161
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """vocab.txt"""}
lowercase__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowercase__ = {
"""openbmb/cpm-ant-10b""": 1024,
}
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : int = collections.OrderedDict()
with open(__UpperCamelCase , "r" , encoding="utf-8" ) as reader:
lowerCAmelCase_ : List[Any] = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : List[str] = token.rstrip("\n" )
lowerCAmelCase_ : str = index
return vocab
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Tuple , a_ : Dict , a_ : Optional[Any]="<unk>" , a_ : List[str]=2_00 ):
lowerCAmelCase_ : int = vocab
lowerCAmelCase_ : List[Any] = unk_token
lowerCAmelCase_ : List[Any] = max_input_chars_per_word
def lowerCamelCase ( self : Any , a_ : Optional[int] ):
lowerCAmelCase_ : Optional[int] = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = []
while start < len(a_ ):
lowerCAmelCase_ : Any = len(a_ )
lowerCAmelCase_ : Any = None
while start < end:
lowerCAmelCase_ : Union[str, Any] = "".join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase_ : Any = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
lowerCAmelCase_ : int = end
return sub_tokens
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = VOCAB_FILES_NAMES
a_ : int = PRETRAINED_VOCAB_FILES_MAP
a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ["""input_ids""", """attention_mask"""]
a_ : Union[str, Any] = False
def __init__( self : Union[str, Any] , a_ : List[str] , a_ : Dict="<d>" , a_ : Tuple="</d>" , a_ : Tuple="<s>" , a_ : int="</s>" , a_ : Tuple="<pad>" , a_ : Dict="<unk>" , a_ : Any="</n>" , a_ : Optional[int]="</_>" , a_ : List[Any]="left" , **a_ : List[Any] , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
lowerCAmelCase_ : Optional[Any] = bod_token
lowerCAmelCase_ : Union[str, Any] = eod_token
lowerCAmelCase_ : Optional[Any] = load_vocab(a_ )
lowerCAmelCase_ : List[str] = self.encoder[space_token]
lowerCAmelCase_ : int = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase_ : Tuple = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
lowerCAmelCase_ : Any = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCamelCase ( self : List[Any] ):
return self.encoder[self.bod_token]
@property
def lowerCamelCase ( self : List[str] ):
return self.encoder[self.eod_token]
@property
def lowerCamelCase ( self : int ):
return self.encoder["\n"]
@property
def lowerCamelCase ( self : Tuple ):
return len(self.encoder )
def lowerCamelCase ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Optional[int] , a_ : Any ):
lowerCAmelCase_ : Optional[int] = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , **a_ : Tuple ):
lowerCAmelCase_ : List[Any] = [i for i in token_ids if i >= 0]
lowerCAmelCase_ : List[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , a_ : Union[str, Any] ):
return token in self.encoder
def lowerCamelCase ( self : List[Any] , a_ : List[str] ):
return "".join(a_ )
def lowerCamelCase ( self : Union[str, Any] , a_ : str ):
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : Union[str, Any] , a_ : int ):
return self.decoder.get(a_ , self.unk_token )
def lowerCamelCase ( self : List[str] , a_ : str , a_ : Optional[str] = None ):
if os.path.isdir(a_ ):
lowerCAmelCase_ : Union[str, Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowerCAmelCase_ : Tuple = (filename_prefix + "-" if filename_prefix else "") + save_directory
lowerCAmelCase_ : str = 0
if " " in self.encoder:
lowerCAmelCase_ : Optional[int] = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase_ : Optional[int] = self.encoder["\n"]
del self.encoder["\n"]
lowerCAmelCase_ : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
with open(a_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def lowerCamelCase ( self : int , a_ : List[int] , a_ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase ( self : List[str] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 161
| 1
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
def __init__( self , A_ , ):
'''simple docstring'''
UpperCamelCase : int = parent
UpperCamelCase : Union[str, Any] = 13
UpperCamelCase : Any = 7
UpperCamelCase : Any = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[Any] = True
UpperCamelCase : List[Any] = 99
UpperCamelCase : List[Any] = 32
UpperCamelCase : Optional[int] = 2
UpperCamelCase : List[Any] = 4
UpperCamelCase : Optional[Any] = 37
UpperCamelCase : Optional[Any] = "gelu"
UpperCamelCase : int = 0.1
UpperCamelCase : Optional[int] = 0.1
UpperCamelCase : List[str] = 512
UpperCamelCase : Union[str, Any] = 16
UpperCamelCase : Tuple = 2
UpperCamelCase : List[Any] = 0.02
UpperCamelCase : Optional[int] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Optional[int] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TFEsmModel(config=A_ )
UpperCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase : int = model(A_ )
UpperCamelCase : List[Any] = [input_ids, input_mask]
UpperCamelCase : int = model(A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Tuple = True
UpperCamelCase : str = TFEsmModel(config=A_ )
UpperCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCamelCase : Dict = model(A_ )
UpperCamelCase : Union[str, Any] = [input_ids, input_mask]
UpperCamelCase : List[Any] = model(A_ , encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = TFEsmForMaskedLM(config=A_ )
UpperCamelCase : Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_labels
UpperCamelCase : Dict = TFEsmForTokenClassification(config=A_ )
UpperCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = config_and_inputs
UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :List[Any] = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = TFEsmModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCamelCase : Optional[Any] = model.get_bias()
assert isinstance(A_ , A_ )
for k, v in name.items():
assert isinstance(A_ , tf.Variable )
else:
UpperCamelCase : str = model.get_output_embeddings()
assert x is None
UpperCamelCase : Dict = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCamelCase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : int = model(A_ )[0]
UpperCamelCase : List[str] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , A_ )
# compare the actual values for a slice.
UpperCamelCase : str = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCamelCase : Optional[int] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[Any] = model(A_ )[0]
# compare the actual values for a slice.
UpperCamelCase : int = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 52
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCamelCase : str = random.Random()
if is_torch_available():
import torch
def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
if rng is None:
UpperCamelCase : Optional[int] = global_rng
UpperCamelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ):
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[Any] = min_seq_length
UpperCamelCase : List[str] = max_seq_length
UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Union[str, Any] = feature_size
UpperCamelCase : List[str] = padding_value
UpperCamelCase : Optional[Any] = sampling_rate
UpperCamelCase : List[str] = return_attention_mask
UpperCamelCase : List[Any] = do_normalize
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = ASTFeatureExtractor
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ASTFeatureExtractionTester(self )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : int = np.asarray(A_ )
UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values
UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
UpperCamelCase : List[Any] = self._load_datasamples(1 )
UpperCamelCase : Tuple = ASTFeatureExtractor()
UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Union[str, Any] ={
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any =[
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] =[
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Any = len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ : Optional[int] = i + 1
else:
UpperCamelCase__ : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 196
| 1
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a__ : Dict = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 54
|
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = PhobertTokenizer
lowercase__ = False
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : Any = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
_UpperCamelCase : Union[str, Any] = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
_UpperCamelCase : int = ['#version: 0.2', 'l à</w>']
_UpperCamelCase : Dict = {'unk_token': '<unk>'}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_UpperCamelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : List[str] ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = 'Tôi là VinAI Research'
_UpperCamelCase : List[str] = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = PhobertTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_UpperCamelCase : Union[str, Any] = 'Tôi là VinAI Research'
_UpperCamelCase : Optional[int] = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
_UpperCamelCase : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
_UpperCamelCase : Optional[int] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
| 236
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : List[str] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase__ ( lowercase ):
lowercase__ = """gptj"""
lowercase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any ,lowerCamelCase__ : Optional[Any]=50400 ,lowerCamelCase__ : Tuple=2048 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : int=28 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Optional[Any]=64 ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : List[Any]="gelu_new" ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Tuple=1E-5 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : str=50256 ,lowerCamelCase__ : Any=50256 ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Optional[Any] = n_positions
_UpperCamelCase : Union[str, Any] = n_embd
_UpperCamelCase : Any = n_layer
_UpperCamelCase : Optional[int] = n_head
_UpperCamelCase : List[str] = n_inner
_UpperCamelCase : List[Any] = rotary_dim
_UpperCamelCase : int = activation_function
_UpperCamelCase : Dict = resid_pdrop
_UpperCamelCase : Any = embd_pdrop
_UpperCamelCase : Union[str, Any] = attn_pdrop
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = bos_token_id
_UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,tie_word_embeddings=lowerCamelCase__ ,**lowerCamelCase__ )
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : str = "default" ,lowerCamelCase__ : List[PatchingSpec] = None ,lowerCamelCase__ : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,task=lowerCamelCase__ ,patching_specs=lowerCamelCase__ ,use_past=lowerCamelCase__ )
if not getattr(self._config ,'pad_token_id' ,lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCamelCase : int = 0
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ ,direction='inputs' )
_UpperCamelCase : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCamelCase : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._config.n_head
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = super(lowerCamelCase__ ,self ).generate_dummy_inputs(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,seq_length=lowerCamelCase__ ,is_pair=lowerCamelCase__ ,framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCamelCase , _UpperCamelCase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase : Optional[int] = seqlen + 2
_UpperCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase : Optional[Any] = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCamelCase : Union[str, Any] = common_inputs['attention_mask']
if self.use_past:
_UpperCamelCase : Any = ordered_inputs['attention_mask'].dtype
_UpperCamelCase : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase__ ,lowerCamelCase__ ,dtype=lowerCamelCase__ )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return 13
| 236
| 1
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
a_ = []
a_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ = int(max(0 , i - limit ) )
a_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
a_ = F'''{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}'''
return "".join(UpperCAmelCase )
# matching characters
a_ = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
a_ = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
a_ = len(UpperCAmelCase )
# transposition
a_ = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
a_ = 0.0
else:
a_ = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 243
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter image url: ''').strip()
print(f"""Downloading image from {url} ...""")
__UpperCAmelCase = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__UpperCAmelCase = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__UpperCAmelCase = requests.get(image_url).content
__UpperCAmelCase = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 42
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase__ ( _a , _a ):
@register_to_config
def __init__( self : str , _a : int = 7_6_8 , ):
super().__init__()
a__: Optional[Any] =nn.Parameter(torch.zeros(1 , _a ) )
a__: List[str] =nn.Parameter(torch.ones(1 , _a ) )
def _lowerCamelCase ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ):
a__: str =nn.Parameter(self.mean.to(_a ).to(_a ) )
a__: List[Any] =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def _lowerCamelCase ( self : List[Any] , _a : Dict ):
a__: str =(embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Optional[Any] =(embeds * self.std) + self.mean
return embeds
| 42
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : List[str] = 1_6
a__ : Union[str, Any] = 3_2
def snake_case ( UpperCAmelCase , UpperCAmelCase = 1_6 )-> Dict:
"""simple docstring"""
__A = AutoTokenizer.from_pretrained('bert-base-cased' )
__A = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A = 1_6
elif accelerator.mixed_precision != "no":
__A = 8
else:
__A = None
return tokenizer.pad(
UpperCAmelCase , padding='longest' , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__A = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
__A = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : Tuple = mocked_dataloaders # noqa: F811
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> List[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCAmelCase ) == "1":
__A = 2
# New Code #
__A = int(args.gradient_accumulation_steps )
__A = int(args.local_sgd_steps )
# Initialize accelerator
__A = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A = config['lr']
__A = int(config['num_epochs'] )
__A = int(config['seed'] )
__A = int(config['batch_size'] )
__A = evaluate.load('glue' , 'mrpc' )
set_seed(UpperCAmelCase )
__A , __A = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A = model.to(accelerator.device )
# Instantiate optimizer
__A = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
__A = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase , model=UpperCAmelCase , local_sgd_steps=UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase ):
__A = model(**UpperCAmelCase )
__A = output.loss
accelerator.backward(UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A = model(**UpperCAmelCase )
__A = outputs.logits.argmax(dim=-1 )
__A , __A = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
__A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCAmelCase )
def snake_case ( )-> str:
"""simple docstring"""
__A = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCAmelCase , default=UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=UpperCAmelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__A = parser.parse_args()
__A = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 161
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : List[Any] = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 161
| 1
|
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : List[str] ={
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_A : Tuple =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
a = """mask2former"""
a = ["""swin"""]
a = {"""hidden_size""": """hidden_dim"""}
def __init__( self: Any , UpperCamelCase__: Optional[Dict] = None , UpperCamelCase__: int = 256 , UpperCamelCase__: int = 256 , UpperCamelCase__: int = 256 , UpperCamelCase__: int = 1_024 , UpperCamelCase__: str = "relu" , UpperCamelCase__: int = 6 , UpperCamelCase__: int = 10 , UpperCamelCase__: int = 8 , UpperCamelCase__: float = 0.0 , UpperCamelCase__: int = 2_048 , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: int = 4 , UpperCamelCase__: int = 255 , UpperCamelCase__: int = 100 , UpperCamelCase__: float = 0.1 , UpperCamelCase__: float = 2.0 , UpperCamelCase__: float = 5.0 , UpperCamelCase__: float = 5.0 , UpperCamelCase__: int = 12_544 , UpperCamelCase__: float = 3.0 , UpperCamelCase__: float = 0.75 , UpperCamelCase__: float = 0.02 , UpperCamelCase__: float = 1.0 , UpperCamelCase__: bool = True , UpperCamelCase__: List[int] = [4, 8, 16, 32] , UpperCamelCase__: bool = None , **UpperCamelCase__: List[Any] , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowerCamelCase__ : Optional[int] = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = backbone_config.pop("""model_type""" )
lowerCamelCase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Union[str, Any] = config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
lowerCamelCase__ : Union[str, Any] = backbone_config
lowerCamelCase__ : int = feature_size
lowerCamelCase__ : str = mask_feature_size
lowerCamelCase__ : Union[str, Any] = hidden_dim
lowerCamelCase__ : str = encoder_feedforward_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Any = dropout
lowerCamelCase__ : Tuple = dim_feedforward
lowerCamelCase__ : str = pre_norm
lowerCamelCase__ : Any = enforce_input_projection
lowerCamelCase__ : Dict = common_stride
lowerCamelCase__ : int = ignore_value
lowerCamelCase__ : List[str] = num_queries
lowerCamelCase__ : int = no_object_weight
lowerCamelCase__ : Optional[int] = class_weight
lowerCamelCase__ : Any = mask_weight
lowerCamelCase__ : Tuple = dice_weight
lowerCamelCase__ : Tuple = train_num_points
lowerCamelCase__ : List[str] = oversample_ratio
lowerCamelCase__ : Any = importance_sample_ratio
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : int = init_xavier_std
lowerCamelCase__ : Dict = use_auxiliary_loss
lowerCamelCase__ : List[str] = feature_strides
lowerCamelCase__ : Tuple = output_auxiliary_logits
lowerCamelCase__ : Optional[int] = decoder_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , UpperCamelCase__: PretrainedConfig , **UpperCamelCase__: List[Any] ):
return cls(
backbone_config=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Dict = self.backbone_config.to_dict()
lowerCamelCase__ : Union[str, Any] = self.__class__.model_type
return output
| 129
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : str ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Union[str, Any] ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : str ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : int ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any:
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""]
lowerCamelCase__ : Any = unet_config["""layers_per_block"""]
lowerCamelCase__ : Any = unet_config["""attention_head_dim"""]
lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""]
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : Tuple = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
lowerCamelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Any = """mid_block.resnets.0"""
lowerCamelCase__ : Optional[Any] = """middle_block.0"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = """mid_block.attentions.0"""
lowerCamelCase__ : Dict = """middle_block.1"""
lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = """mid_block.resnets.1"""
lowerCamelCase__ : Tuple = """middle_block.2"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : Optional[int] = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""]
lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : Tuple =parser.parse_args()
_A : Optional[int] =strabool(args.class_cond)
_A : List[str] =os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : Any =TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : str =None
_A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Tuple =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : int =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_A : str =CMStochasticIterativeScheduler(**scheduler_config)
_A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 129
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Any = params
lowercase__: List[Any] = np.array(lowerCAmelCase__ )
lowercase__: Optional[Any] = np.array([len(lowerCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.lengths )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.params.max_model_input_size
lowercase__: Dict = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCAmelCase__ )} too long sequences.' )
def divide_chunks(lowerCAmelCase__ , lowerCAmelCase__ ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
lowercase__: str = []
lowercase__: List[str] = []
if self.params.mlm:
lowercase__ , lowercase__: str = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowercase__ , lowercase__: int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__: Optional[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__: int = np.insert(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
if sub_s[-1] != sep_id:
lowercase__: Union[str, Any] = np.insert(lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase__ )
new_tok_ids.extend(lowerCAmelCase__ )
new_lengths.extend([len(lowerCAmelCase__ ) for l in sub_seqs] )
lowercase__: Union[str, Any] = np.array(lowerCAmelCase__ )
lowercase__: Union[str, Any] = np.array(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = len(self )
lowercase__: Union[str, Any] = self.lengths > 11
lowercase__: List[Any] = self.token_ids[indices]
lowercase__: Optional[Any] = self.lengths[indices]
lowercase__: List[str] = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__: List[str] = self.params.special_tok_ids['unk_token']
lowercase__: str = len(self )
lowercase__: Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__: Any = (unk_occs / self.lengths) < 0.5
lowercase__: Tuple = self.token_ids[indices]
lowercase__: str = self.lengths[indices]
lowercase__: Dict = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = [t[0] for t in batch]
lowercase__: Dict = [t[1] for t in batch]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
# Max for paddings
lowercase__: List[Any] = max(lowerCAmelCase__ )
# Pad token ids
if self.params.mlm:
lowercase__: Dict = self.params.special_tok_ids['pad_token']
else:
lowercase__: Optional[Any] = self.params.special_tok_ids['unk_token']
lowercase__: int = [list(t.astype(lowerCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase__ )
assert all(len(lowerCAmelCase__ ) == max_seq_len_ for t in tk_ )
lowercase__: Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__: Optional[Any] = torch.tensor(lowerCAmelCase__ ) # (bs)
return tk_t, lg_t
| 196
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: int = {}
if "candidate_labels" in kwargs:
lowercase__: Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowercase__: List[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a photo of {}." ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = load_image(lowerCAmelCase__ )
lowercase__: Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__: Tuple = candidate_labels
lowercase__: List[str] = [hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
lowercase__: Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
lowercase__: str = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: str = model_inputs.pop('candidate_labels' )
lowercase__: List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
lowercase__: Any = text_inputs[0]
else:
# Batching case.
lowercase__: Optional[int] = text_inputs[0][0]
lowercase__: Tuple = self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Any = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = model_outputs.pop('candidate_labels' )
lowercase__: Dict = model_outputs['logits'][0]
if self.framework == "pt":
lowercase__: Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__: Dict = probs.tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Dict = [scores]
elif self.framework == "tf":
lowercase__: Optional[int] = stable_softmax(lowerCAmelCase__ , axis=-1 )
lowercase__: Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowercase__: List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 196
| 1
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return array
lowercase , lowercase :Optional[Any] = min(lowerCamelCase ), max(lowerCamelCase )
# Compute the variables
lowercase :Optional[int] = _max - _min + 1
lowercase , lowercase :Any = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase :Optional[int] = i - _min
lowercase :Tuple = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase :int = 0
for i in range(lowerCamelCase ):
while holes_repeat[i] > 0:
lowercase :Dict = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[int] = input("Enter numbers separated by comma:\n")
_UpperCAmelCase : Optional[Any] = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 236
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Tuple = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''ernie_m'''
_a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: List[Any] , _lowerCAmelCase: int = 25_00_02 , _lowerCAmelCase: int = 7_68 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 30_72 , _lowerCAmelCase: str = "gelu" , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: int = 5_14 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 1e-0_5 , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: List[str]=0.0 , **_lowerCAmelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase :Tuple = vocab_size
lowercase :List[str] = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = intermediate_size
lowercase :Optional[Any] = hidden_act
lowercase :Any = hidden_dropout_prob
lowercase :int = attention_probs_dropout_prob
lowercase :Dict = max_position_embeddings
lowercase :Optional[Any] = initializer_range
lowercase :Any = layer_norm_eps
lowercase :Union[str, Any] = classifier_dropout
lowercase :int = is_decoder
lowercase :List[str] = act_dropout
| 236
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : str = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""MaskFormerFeatureExtractor"""]
UpperCAmelCase : Optional[int] = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
UpperCAmelCase : List[str] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 148
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Tuple =set()
a__ : Optional[Any] =[]
def parse_line(SCREAMING_SNAKE_CASE : Optional[int] ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : str =line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : Union[str, Any] ="\n".join(SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
a__ : Optional[Any] =line.strip()
buffer.append(SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE ):
a__ : str =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[int] =set()
a__ : Any =[os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for p in os.listdir(SCREAMING_SNAKE_CASE ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return values.split("," )
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : str = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase : Tuple = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 148
| 1
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__lowercase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
_snake_case = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
_snake_case = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 42
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( UpperCamelCase_ : list[list[float]]) -> list[list[float]]:
'''simple docstring'''
__lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase_) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__lowercase = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("This matrix has no inverse.")
# Creates a copy of the matrix with swapped positions of the elements
__lowercase = [[0.0, 0.0], [0.0, 0.0]]
__lowercase ,__lowercase = matrix[1][1], matrix[0][0]
__lowercase ,__lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase_)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase_) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowercase = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("This matrix has no inverse.")
# Creating cofactor matrix
__lowercase = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__lowercase = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__lowercase = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__lowercase = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__lowercase = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__lowercase = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__lowercase = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__lowercase = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__lowercase = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__lowercase = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowercase = array(UpperCamelCase_)
for i in range(3):
for j in range(3):
__lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowercase = array(UpperCamelCase_)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(UpperCamelCase_)
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase_)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3.")
| 144
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict ):
__lowercase = dataset
__lowercase = process
__lowercase = params
def __len__( self : str ):
return len(self.dataset )
def __getitem__( self : List[Any], UpperCAmelCase__ : int ):
__lowercase = self.dataset[i]
__lowercase = self.process(UpperCAmelCase__, **self.params )
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any]=None ):
__lowercase = loader
__lowercase = infer
__lowercase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowercase = None
__lowercase = loader_batch_size
# Internal bookkeeping
__lowercase = None
__lowercase = None
def __len__( self : str ):
return len(self.loader )
def __iter__( self : List[str] ):
__lowercase = iter(self.loader )
return self
def _lowercase ( self : Union[str, Any] ):
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowercase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowercase = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
# Convert ModelOutput to tuple first
__lowercase = element.to_tuple()
if isinstance(element[0], torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowercase = None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowercase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowercase = self._loader_batch_data.__class__(UpperCAmelCase__ )
self._loader_batch_index += 1
return result
def _lowercase ( self : Tuple ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowercase = next(self.iterator )
__lowercase = self.infer(UpperCAmelCase__, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = len(UpperCAmelCase__ )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
# Setting internal index to unwrap the batch
__lowercase = processed
__lowercase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : str=None ):
super().__init__(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def __iter__( self : str ):
__lowercase = iter(self.loader )
__lowercase = None
return self
def _lowercase ( self : int ):
if self.subiterator is None:
__lowercase = self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
__lowercase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowercase = self.infer(next(self.iterator ), **self.params )
__lowercase = next(self.subiterator )
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __iter__( self : int ):
__lowercase = iter(self.loader )
return self
def _lowercase ( self : List[str] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__lowercase = False
__lowercase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
if is_last:
return accumulator
while not is_last:
__lowercase = self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = len(UpperCAmelCase__ )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
__lowercase = processed
__lowercase = 0
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
if is_last:
return accumulator
else:
__lowercase = processed
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
return accumulator
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[Any], UpperCAmelCase__ : Dataset, UpperCAmelCase__ : str ):
__lowercase = dataset
__lowercase = key
def __len__( self : Optional[Any] ):
return len(self.dataset )
def __getitem__( self : Union[str, Any], UpperCAmelCase__ : Any ):
return self.dataset[i][self.key]
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : Dataset, UpperCAmelCase__ : str, UpperCAmelCase__ : str ):
__lowercase = dataset
__lowercase = keya
__lowercase = keya
def __len__( self : Optional[int] ):
return len(self.dataset )
def __getitem__( self : Dict, UpperCAmelCase__ : Tuple ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 144
| 1
|
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = int(lowerCamelCase_)
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Any = divmod(lowerCamelCase_ ,2)
return binary_recursive(lowerCamelCase_) + str(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = str(lowerCamelCase_).strip()
if not number:
raise ValueError('''No input value was provided''')
lowerCAmelCase__ : str = '''-''' if number.startswith('''-''') else ''''''
lowerCAmelCase__ : Any = number.lstrip('''-''')
if not number.isnumeric():
raise ValueError('''Input value is not an integer''')
return f"""{negative}0b{binary_recursive(int(lowerCamelCase_))}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 129
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Dict =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : List[str] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a sound of {}." ) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ : List[str] = requests.get(__lowerCamelCase ).content
else:
with open(__lowerCamelCase ,'''rb''' ) as f:
lowerCAmelCase__ : int = f.read()
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ffmpeg_read(__lowerCamelCase ,self.feature_extractor.sampling_rate )
if not isinstance(__lowerCamelCase ,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCAmelCase__ : Any = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = candidate_labels
lowerCAmelCase__ : str = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,return_tensors=self.framework ,padding=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = [text_inputs]
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowerCAmelCase__ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,__lowerCamelCase ):
lowerCAmelCase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : List[str] = text_inputs[0][0]
lowerCAmelCase__ : Union[str, Any] = self.model(**__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowerCAmelCase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase__ : str = logits.softmax(dim=0 )
lowerCAmelCase__ : Dict = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCAmelCase__ : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase ,__lowerCamelCase ) ,key=lambda __lowerCamelCase : -x[0] )
]
return result
| 129
| 1
|
"""simple docstring"""
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case_( a__ , a__ ):
__UpperCamelCase = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase_ : List[str]=2_0_0_0 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[int]=2_0 , UpperCamelCase_ : str=1E-3 ):
lowerCAmelCase : Any = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Union[str, Any] = None
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Dict = None ):
lowerCAmelCase : Tuple = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase_ , device=lowerCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase : Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase : Union[str, Any] = std.unsqueeze(-1 )
lowerCAmelCase : int = -score / std
# compute
lowerCAmelCase : Optional[Any] = -1.0 / len(self.timesteps )
lowerCAmelCase : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase : Optional[int] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase : Union[str, Any] = beta_t.unsqueeze(-1 )
lowerCAmelCase : List[Any] = -0.5 * beta_t * x
lowerCAmelCase : int = torch.sqrt(lowerCamelCase_ )
lowerCAmelCase : Any = drift - diffusion**2 * score
lowerCAmelCase : Dict = x + drift * dt
# add noise
lowerCAmelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase_ , device=x.device , dtype=x.dtype )
lowerCAmelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Any ):
return self.config.num_train_timesteps
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase_ = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
UpperCamelCase_ = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
UpperCamelCase_ = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/krishnap25/mauve""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/krishnap25/mauve"""] ,reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: Any="auto" ,lowerCamelCase_: str=-1 ,lowerCamelCase_: Optional[int]=0.9 ,lowerCamelCase_: Any=5 ,lowerCamelCase_: Union[str, Any]=500 ,lowerCamelCase_: Union[str, Any]="gpt2-large" ,lowerCamelCase_: int=-1 ,lowerCamelCase_: Optional[Any]=1024 ,lowerCamelCase_: Any=25 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=25 ,) -> Tuple:
UpperCAmelCase_ : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ ,q_text=lowerCamelCase_ ,p_features=lowerCamelCase_ ,q_features=lowerCamelCase_ ,p_tokens=lowerCamelCase_ ,q_tokens=lowerCamelCase_ ,num_buckets=lowerCamelCase_ ,pca_max_data=lowerCamelCase_ ,kmeans_explained_var=lowerCamelCase_ ,kmeans_num_redo=lowerCamelCase_ ,kmeans_max_iter=lowerCamelCase_ ,featurize_model_name=lowerCamelCase_ ,device_id=lowerCamelCase_ ,max_text_length=lowerCamelCase_ ,divergence_curve_discretization_size=lowerCamelCase_ ,mauve_scaling_factor=lowerCamelCase_ ,verbose=lowerCamelCase_ ,seed=lowerCamelCase_ ,)
return out
| 345
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345
| 1
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
A : Any = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **__lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Optional[Any] , __lowerCAmelCase : Union[np.ndarray, bytes, str] , **__lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : List[str] , **__lowerCAmelCase : int ) -> Any:
"""simple docstring"""
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def a_ ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any=None , __lowerCAmelCase : Dict="This is a sound of {}." ) -> Tuple:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ = requests.get(__lowerCAmelCase ).content
else:
with open(__lowerCAmelCase , """rb""" ) as f:
A__ = f.read()
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = ffmpeg_read(__lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(__lowerCAmelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ = candidate_labels
A__ = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
A__ = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
A__ = [text_inputs]
return inputs
def a_ ( self : Tuple , __lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
A__ = model_inputs.pop("""candidate_labels""" )
A__ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
A__ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def a_ ( self : Tuple , __lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
A__ = model_outputs.pop("""candidate_labels""" )
A__ = model_outputs["""logits"""][0]
if self.framework == "pt":
A__ = logits.softmax(dim=0 )
A__ = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : -x[0] )
]
return result
| 369
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=33 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : List[Any]=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=5_12 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Tuple=None , ) -> int:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
A__ = EsmModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
A__ = EsmForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
A__ = self.num_labels
A__ = EsmForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[Any] = ()
__lowerCamelCase : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = True
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = EsmModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def a_ ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = EsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
A__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
A__ = create_position_ids_from_input_ids(__lowerCAmelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
def a_ ( self : List[Any] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.empty(2 , 4 , 30 )
A__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
A__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
A__ = embeddings.create_position_ids_from_inputs_embeds(__lowerCAmelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
A__ = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = model(__lowerCAmelCase )[0]
A__ = 33
A__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def a_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
A__ = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 276
| 0
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def UpperCamelCase__ ( lowercase__ : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def UpperCamelCase__ ( lowercase__ : str = "" ):
if len(lowercase__ ) == 0:
return True
snake_case : Any = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
snake_case : dict[str, int] = {}
for character in lower_case_input_str:
snake_case : Dict = character_freq_dict.get(lowercase__ , 0 ) + 1
snake_case : List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase__ ( lowercase__ : str = "" ):
print("\nFor string = " , lowercase__ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowercase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowercase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
__A = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__A = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 148
|
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCamelCase__ :
def __init__( self ):
"""simple docstring"""
snake_case : Dict = []
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.node_position[vertex]
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = pos
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case : Any = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case : Any = 2 * start + 1
else:
snake_case : Union[str, Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case , snake_case : Dict = heap[smallest_child], positions[smallest_child]
snake_case , snake_case : Any = (
heap[start],
positions[start],
)
snake_case , snake_case : str = temp, tempa
snake_case : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE )
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = position[index]
while index != 0:
snake_case : Dict = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case : Tuple = heap[parent]
snake_case : str = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE )
else:
snake_case : Union[str, Any] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
break
snake_case : Optional[Any] = parent
else:
snake_case : Optional[int] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , 0 )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = len(SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Union[str, Any] = positions[0]
snake_case : List[str] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return temp
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
snake_case : Tuple = Heap()
snake_case : List[str] = [0] * len(lowercase__ )
snake_case : Optional[int] = [-1] * len(lowercase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case : List[Any] = []
for vertex in range(len(lowercase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowercase__ )
heap.node_position.append(lowercase__ )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = 1
snake_case : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case : List[Any] = 0
snake_case : Tuple = distance
heap.heapify(lowercase__ , lowercase__ )
for _ in range(1 , len(lowercase__ ) ):
snake_case : Optional[Any] = heap.delete_minimum(lowercase__ , lowercase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case : Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowercase__ )]
):
snake_case : str = distance
heap.bottom_to_top(
lowercase__ , heap.get_position(lowercase__ ) , lowercase__ , lowercase__ )
snake_case : Optional[int] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 148
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = SpeechTaTokenizer
lowercase_ = False
lowercase_ = True
def snake_case ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : str = SpeechTaTokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Any = AddedToken("<mask>" , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
lowercase__ : str = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Optional[Any] = "this is a test"
lowercase__ : Optional[int] = "this is a test"
return input_text, output_text
def snake_case ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[Any]=20 , SCREAMING_SNAKE_CASE : Tuple=5 ):
lowercase__ : Dict = self.get_input_output_texts(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = tokenizer.decode(SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
return text, ids
def snake_case ( self : str ):
lowercase__ : Any = "<pad>"
lowercase__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 81 )
def snake_case ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : Union[str, Any] = tokenizer.vocab_size
lowercase__ : str = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase__ : List[Any] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
lowercase__ : int = tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.vocab_size
lowercase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , all_size + len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowercase__ : List[str] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
lowercase__ : Optional[Any] = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.vocab_size
lowercase__ : str = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , all_size_a + len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def snake_case ( self : Optional[Any] ):
pass
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowercase__ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
lowercase__ : str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowercase__ : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def snake_case ( self : Any ):
# Use custom sequence because this tokenizer does not handle numbers.
lowercase__ : Dict = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
lowercase__ : Dict = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=SCREAMING_SNAKE_CASE , )
| 353
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return TrainCommand(lowerCamelCase__ )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE : ArgumentParser ):
lowercase__ : Optional[int] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=SCREAMING_SNAKE_CASE , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=SCREAMING_SNAKE_CASE , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=SCREAMING_SNAKE_CASE , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=SCREAMING_SNAKE_CASE , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=SCREAMING_SNAKE_CASE , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=SCREAMING_SNAKE_CASE , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=SCREAMING_SNAKE_CASE , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=SCREAMING_SNAKE_CASE , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=SCREAMING_SNAKE_CASE , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=SCREAMING_SNAKE_CASE , default=1E-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self : int , SCREAMING_SNAKE_CASE : Namespace ):
lowercase__ : int = logging.get_logger("transformers-cli/training" )
lowercase__ : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = args.output
lowercase__ : Union[str, Any] = args.column_label
lowercase__ : Optional[int] = args.column_text
lowercase__ : Optional[int] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowercase__ : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowercase__ : List[str] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Union[str, Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowercase__ : Optional[int] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Dict = args.validation_split
lowercase__ : List[str] = args.train_batch_size
lowercase__ : Any = args.valid_batch_size
lowercase__ : Optional[int] = args.learning_rate
lowercase__ : int = args.adam_epsilon
def snake_case ( self : Dict ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self : Union[str, Any] ):
raise NotImplementedError
def snake_case ( self : Union[str, Any] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 121
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _snake_case ( ) -> Optional[Any]:
lowerCamelCase_ : Tuple =ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
lowerCamelCase_ : Tuple =parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase__ )
# Let's go
lowerCamelCase_ : Tuple =parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
lowerCamelCase_ : List[Any] =args.func(lowerCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 144
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case ( lowerCamelCase__ : Tuple ) -> List[Any]:
lowerCamelCase_ : Union[str, Any] =384
if "tiny" in model_name:
lowerCamelCase_ : str =[3, 3, 9, 3]
lowerCamelCase_ : Union[str, Any] =[96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase_ : Tuple =[3, 3, 27, 3]
lowerCamelCase_ : List[str] =[96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase_ : Tuple =[3, 3, 27, 3]
lowerCamelCase_ : Tuple =[128, 256, 512, 1_024]
lowerCamelCase_ : str =512
if "large" in model_name:
lowerCamelCase_ : Optional[int] =[3, 3, 27, 3]
lowerCamelCase_ : Optional[int] =[192, 384, 768, 1_536]
lowerCamelCase_ : Optional[Any] =768
if "xlarge" in model_name:
lowerCamelCase_ : str =[3, 3, 27, 3]
lowerCamelCase_ : Optional[Any] =[256, 512, 1_024, 2_048]
lowerCamelCase_ : Any =1_024
# set label information
lowerCamelCase_ : Dict =150
lowerCamelCase_ : Union[str, Any] ="huggingface/label-files"
lowerCamelCase_ : Optional[int] ="ade20k-id2label.json"
lowerCamelCase_ : str =json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ : Dict ={int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : Optional[Any] ={v: k for k, v in idalabel.items()}
lowerCamelCase_ : Optional[int] =ConvNextConfig(
depths=lowerCamelCase__ , hidden_sizes=lowerCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCamelCase_ : Any =UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def _snake_case ( lowerCamelCase__ : str ) -> str:
lowerCamelCase_ : List[str] =[]
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ) -> Dict:
lowerCamelCase_ : List[str] =dct.pop(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =val
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ) -> Dict:
lowerCamelCase_ : Union[str, Any] ={
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
lowerCamelCase_ : Optional[int] =model_name_to_url[model_name]
lowerCamelCase_ : Optional[Any] =torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["state_dict"]
lowerCamelCase_ : List[Any] =get_upernet_config(lowerCamelCase__ )
lowerCamelCase_ : Tuple =UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ : Optional[Any] =state_dict.pop(lowerCamelCase__ )
if "bn" in key:
lowerCamelCase_ : str =key.replace("bn" , "batch_norm" )
lowerCamelCase_ : Union[str, Any] =val
# rename keys
lowerCamelCase_ : Tuple =create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
lowerCamelCase_ : List[str] ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase_ : Union[str, Any] =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
lowerCamelCase_ : List[str] =SegformerImageProcessor()
lowerCamelCase_ : int =processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowerCamelCase_ : Tuple =model(lowerCamelCase__ )
if model_name == "upernet-convnext-tiny":
lowerCamelCase_ : List[Any] =torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase_ : Dict =torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase_ : Tuple =torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase_ : Dict =torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase_ : List[Any] =torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 144
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Dict = {"""vocab_file""": """vocab.txt"""}
_a : int = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
_a : Optional[Any] = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def _lowerCAmelCase ( lowercase ) -> int:
with open(lowercase , """r""" ) as f:
__lowerCAmelCase = f.read().splitlines()
return [l.strip() for l in lines]
class _UpperCAmelCase ( lowerCAmelCase__ ):
a : Optional[Any] =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple =["input_ids", "attention_mask"]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<cls>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<mask>",__SCREAMING_SNAKE_CASE="<eos>",**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(**a__ )
__lowerCAmelCase = load_vocab_file(a__ )
__lowerCAmelCase = dict(enumerate(self.all_tokens ) )
__lowerCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__lowerCAmelCase = unk_token
__lowerCAmelCase = cls_token
__lowerCAmelCase = pad_token
__lowerCAmelCase = mask_token
__lowerCAmelCase = eos_token
__lowerCAmelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self._id_to_token.get(a__,self.unk_token )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self._token_to_id.get(a__,self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return text.split()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
return len(self._id_to_token )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self._token_to_id.get(a__,self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self._id_to_token.get(a__,self.unk_token )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__lowerCAmelCase = [1] + ([0] * len(a__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(a__ ) + [1]
return mask
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = os.path.join(a__,(filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(a__,"""w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=a__ )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
return super()._add_tokens(a__,special_tokens=a__ )
| 363
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : str =KandinskyVaaInpaintPipeline
a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a : str =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Dict =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase = np.ones((64, 64),dtype=np.floataa )
__lowerCAmelCase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
# Initialise PyTorch model
snake_case_ = LxmertConfig.from_json_file(UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 69
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase_ (_lowerCAmelCase : list[float] ):
if len(UpperCAmelCase__ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__UpperCamelCase : List[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase : Optional[int] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def UpperCAmelCase_ (_lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int=None ):
# Initialise PyTorch model
__UpperCamelCase : str = XLNetConfig.from_json_file(_lowerCAmelCase )
__UpperCamelCase : int = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
__UpperCamelCase : List[str] = finetuning_task
__UpperCamelCase : List[str] = GLUE_TASKS_NUM_LABELS[finetuning_task]
__UpperCamelCase : Dict = XLNetForSequenceClassification(_lowerCAmelCase )
elif "squad" in finetuning_task:
__UpperCamelCase : List[str] = finetuning_task
__UpperCamelCase : Optional[int] = XLNetForQuestionAnswering(_lowerCAmelCase )
else:
__UpperCamelCase : Optional[int] = XLNetLMHeadModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
__UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(F'''Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'''Save configuration file to {os.path.abspath(_lowerCAmelCase )}''' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowercase : Dict = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 171
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
return base * power(_UpperCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
UpperCamelCase__ = int(input("""Enter the base: """).strip())
UpperCamelCase__ = int(input("""Enter the exponent: """).strip())
UpperCamelCase__ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCamelCase__ = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 92
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : Any , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCAmelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0)
self.assertGreater(len(lowercase_) , 0)
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"score": ANY(lowercase_),
"label": ANY(lowercase_),
"box": {"xmin": ANY(lowercase_), "ymin": ANY(lowercase_), "xmax": ANY(lowercase_), "ymax": ANY(lowercase_)},
} , )
import datasets
_UpperCamelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test")
_UpperCamelCase = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase = object_detector(lowercase_ , threshold=0.0)
self.assertEqual(len(lowercase_) , len(lowercase_))
for outputs in batch_outputs:
self.assertGreater(len(lowercase_) , 0)
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"score": ANY(lowercase_),
"label": ANY(lowercase_),
"box": {"xmin": ANY(lowercase_), "ymin": ANY(lowercase_), "xmax": ANY(lowercase_), "ymax": ANY(lowercase_)},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF")
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase = AutoModelForObjectDetection.from_pretrained(lowercase_)
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowercase_)
_UpperCamelCase = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_)
_UpperCamelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = "facebook/detr-resnet-50"
_UpperCamelCase = AutoModelForObjectDetection.from_pretrained(lowercase_)
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowercase_)
_UpperCamelCase = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_)
_UpperCamelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
])
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCamelCase = "facebook/detr-resnet-50"
_UpperCamelCase = pipeline("object-detection" , model=lowercase_)
_UpperCamelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
])
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 0.99_85
_UpperCamelCase = "facebook/detr-resnet-50"
_UpperCamelCase = pipeline("object-detection" , model=lowercase_)
_UpperCamelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowercase_)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase = 0.99_93
_UpperCamelCase = pipeline("object-detection" , model=lowercase_ , threshold=lowercase_)
_UpperCamelCase = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png")
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 360
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Optional[Any]=6 , lowercase_ : int=17 , lowercase_ : List[Any]=23 , lowercase_ : List[Any]=11 , lowercase_ : Dict=True , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = act_dim
_UpperCamelCase = state_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_length
_UpperCamelCase = is_training
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000)
_UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length))
_UpperCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict , ) -> int:
"""simple docstring"""
_UpperCamelCase = DecisionTransformerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (DecisionTransformerModel,) if is_torch_available() else ()
__A = ()
__A = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__A = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = DecisionTransformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
@slow
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DecisionTransformerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(lowercase_)] , lowercase_)
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform
_UpperCamelCase = 10 # defined by the RL environment, may be normalized
_UpperCamelCase = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert")
_UpperCamelCase = model.to(lowercase_)
_UpperCamelCase = model.config
torch.manual_seed(0)
_UpperCamelCase = torch.randn(1 , 1 , config.state_dim).to(device=lowercase_ , dtype=torch.floataa) # env.reset()
_UpperCamelCase = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=lowercase_)
_UpperCamelCase = torch.tensor(lowercase_ , device=lowercase_ , dtype=torch.floataa).reshape(1 , 1 , 1)
_UpperCamelCase = state
_UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=lowercase_ , dtype=torch.floataa)
_UpperCamelCase = torch.zeros(1 , 0 , device=lowercase_ , dtype=torch.floataa)
_UpperCamelCase = torch.tensor(0 , device=lowercase_ , dtype=torch.long).reshape(1 , 1)
for step in range(lowercase_):
_UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowercase_)] , dim=1)
_UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=lowercase_)] , dim=1)
_UpperCamelCase = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model(
states=lowercase_ , actions=lowercase_ , rewards=lowercase_ , returns_to_go=lowercase_ , timesteps=lowercase_ , attention_mask=lowercase_ , return_dict=lowercase_ , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=lowercase_ , dtype=torch.floataa),
1.0,
False,
{},
)
_UpperCamelCase = action_pred[0, -1]
_UpperCamelCase = torch.cat([states, state] , dim=1)
_UpperCamelCase = returns_to_go[0, -1] - reward
_UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
_UpperCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowercase_ , dtype=torch.long) * (step + 1)] , dim=1)
| 63
| 0
|
def a__ ( UpperCAmelCase : Optional[Any] = 1_000_000 ) -> int:
UpperCAmelCase : Dict = limit + 1
UpperCAmelCase : List[Any] = [0] * limit
for first_term in range(1 , UpperCAmelCase ):
for n in range(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Dict = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase : Optional[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 336
|
UpperCAmelCase__ : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( a , a , a ) -> list[str]:
_A: Union[str, Any] = set()
# keep track of all the paths to be checked
_A: Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_A: Any = queue.pop(0 )
# get the last node from the path
_A: Union[str, Any] = path[-1]
if node not in explored:
_A: str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_A: Optional[int] = list(a )
new_path.append(a )
queue.append(a )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( a , a , a ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_A: Any = [start]
_A: List[str] = set(a )
# Keep tab on distances from `start` node.
_A: Optional[int] = {start: 0, target: -1}
while queue:
_A: Union[str, Any] = queue.pop(0 )
if node == target:
_A: Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a )
queue.append(a )
_A: List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 121
| 0
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _lowerCAmelCase :
def __init__(self ):
A_ : Dict = {}
def _a (self , lowercase , lowercase , lowercase=1 ):
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A_ : Dict = [[w, v]]
if not self.graph.get(lowercase ):
A_ : Optional[int] = []
def _a (self ):
return list(self.graph )
def _a (self , lowercase , lowercase ):
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def _a (self , lowercase=-2 , lowercase=-1 ):
if s == d:
return []
A_ : Union[str, Any] = []
A_ : Dict = []
if s == -2:
A_ : Optional[int] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A_ : str = stack[len(lowercase ) - 1]
else:
A_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def _a (self , lowercase=-1 ):
if c == -1:
A_ : Tuple = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def _a (self , lowercase=-2 ):
A_ : str = deque()
A_ : Any = []
if s == -2:
A_ : List[str] = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A_ : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a (self , lowercase ):
A_ : Any = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _a (self , lowercase ):
return len(self.graph[u] )
def _a (self , lowercase=-2 ):
A_ : Optional[int] = []
A_ : Dict = []
if s == -2:
A_ : Optional[Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Union[str, Any] = s
A_ : Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A_ : List[str] = stack[len(lowercase ) - 1]
else:
A_ : List[str] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def _a (self ):
A_ : Any = []
A_ : List[Any] = []
A_ : str = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Optional[Any] = -2
A_ : int = []
A_ : Optional[Any] = s
A_ : Any = False
A_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : List[str] = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Union[str, Any] = True
if len(lowercase ) != 0:
A_ : Dict = stack[len(lowercase ) - 1]
else:
A_ : List[str] = False
indirect_parents.append(lowercase )
A_ : Optional[int] = s
A_ : str = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def _a (self ):
A_ : str = []
A_ : int = []
A_ : int = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Optional[Any] = -2
A_ : int = []
A_ : Any = s
A_ : List[Any] = False
A_ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : int = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : str = True
if len(lowercase ) != 0:
A_ : Optional[int] = stack[len(lowercase ) - 1]
else:
A_ : int = False
indirect_parents.append(lowercase )
A_ : List[Any] = s
A_ : Dict = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def _a (self , lowercase=-2 , lowercase=-1 ):
A_ : List[Any] = time()
self.dfs(lowercase , lowercase )
A_ : Optional[int] = time()
return end - begin
def _a (self , lowercase=-2 ):
A_ : Union[str, Any] = time()
self.bfs(lowercase )
A_ : Optional[int] = time()
return end - begin
class _lowerCAmelCase :
def __init__(self ):
A_ : List[str] = {}
def _a (self , lowercase , lowercase , lowercase=1 ):
# check if the u exists
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A_ : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A_ : Tuple = [[w, u]]
def _a (self , lowercase , lowercase ):
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def _a (self , lowercase=-2 , lowercase=-1 ):
if s == d:
return []
A_ : Optional[Any] = []
A_ : Union[str, Any] = []
if s == -2:
A_ : Optional[int] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A_ : int = stack[len(lowercase ) - 1]
else:
A_ : int = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def _a (self , lowercase=-1 ):
if c == -1:
A_ : str = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def _a (self , lowercase=-2 ):
A_ : Dict = deque()
A_ : str = []
if s == -2:
A_ : List[Any] = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a (self , lowercase ):
return len(self.graph[u] )
def _a (self ):
A_ : List[Any] = []
A_ : List[str] = []
A_ : Tuple = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Optional[int] = -2
A_ : Tuple = []
A_ : Union[str, Any] = s
A_ : Any = False
A_ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Optional[Any] = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Tuple = True
if len(lowercase ) != 0:
A_ : List[str] = stack[len(lowercase ) - 1]
else:
A_ : Optional[Any] = False
indirect_parents.append(lowercase )
A_ : Any = s
A_ : Any = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def _a (self ):
A_ : str = []
A_ : str = []
A_ : Union[str, Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Tuple = -2
A_ : Optional[int] = []
A_ : Union[str, Any] = s
A_ : int = False
A_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Dict = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Any = True
if len(lowercase ) != 0:
A_ : Any = stack[len(lowercase ) - 1]
else:
A_ : Optional[int] = False
indirect_parents.append(lowercase )
A_ : List[str] = s
A_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def _a (self ):
return list(self.graph )
def _a (self , lowercase=-2 , lowercase=-1 ):
A_ : Optional[Any] = time()
self.dfs(lowercase , lowercase )
A_ : Dict = time()
return end - begin
def _a (self , lowercase=-2 ):
A_ : List[str] = time()
self.bfs(lowercase )
A_ : List[Any] = time()
return end - begin
| 135
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=128 , lowercase=32 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : Union[str, Any] = parent
A_ : Optional[int] = batch_size
A_ : Any = seq_length
A_ : int = is_training
A_ : List[str] = use_input_mask
A_ : Any = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Dict = vocab_size
A_ : Optional[int] = hidden_size
A_ : int = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Dict = intermediate_size
A_ : List[str] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : List[Any] = num_labels
A_ : str = num_choices
A_ : Tuple = scope
def _a (self ):
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Tuple = None
if self.use_input_mask:
A_ : str = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Any = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : Any = None
A_ : List[Any] = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : int = ids_tensor([self.batch_size] , self.num_choices )
A_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self ):
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Union[str, Any] = self.prepare_config_and_inputs()
A_ : Union[str, Any] = True
A_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = NezhaModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
A_ : Optional[Any] = model(lowercase , token_type_ids=lowercase )
A_ : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : Optional[int] = True
A_ : Optional[Any] = NezhaModel(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
A_ : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , )
A_ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = NezhaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Tuple = NezhaForNextSentencePrediction(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : int = NezhaForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
A_ : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = NezhaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = self.num_labels
A_ : int = NezhaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = self.num_labels
A_ : Optional[int] = NezhaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : int = self.num_choices
A_ : int = NezhaForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self ):
A_ : Tuple = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : int = config_and_inputs
A_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[Any] = True
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Optional[Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
A_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
A_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _a (self ):
A_ : Optional[int] = NezhaModelTester(self )
A_ : Any = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def _a (self ):
# This regression test was failing with PyTorch < 1.3
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : str = None
self.model_tester.create_and_check_model_as_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def _a (self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = NezhaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A_ : Optional[int] = True
A_ : str = model_class(config=lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase )
A_ : Tuple = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """bert.pt""" ) )
A_ : List[str] = torch.jit.load(os.path.join(lowercase , """bert.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Dict = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
A_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : Optional[int] = model(lowercase , attention_mask=lowercase )[0]
A_ : Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase )
A_ : List[Any] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
A_ : str = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
A_ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ : str = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : Tuple = model(lowercase , attention_mask=lowercase )[0]
A_ : str = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 135
| 1
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
snake_case = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
snake_case = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
snake_case = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
snake_case = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
snake_case = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __lowercase ( __lowercase , __lowercase , __lowercase = False , __lowercase = None , ) -> Optional[Any]:
'''simple docstring'''
def _dataset(__lowercase , __lowercase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , ref_path=__lowercase , )
return LineByLineTextDataset(tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_A = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
_A = AutoModelWithLMHead.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_A = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_A = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_A = (
get_dataset(__lowercase , tokenizer=__lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_A = (
get_dataset(__lowercase , tokenizer=__lowercase , evaluate=__lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_A = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_A = DataCollatorForWholeWordMask(
tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
else:
_A = DataCollatorForLanguageModeling(
tokenizer=__lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_A = Trainer(
model=__lowercase , args=__lowercase , data_collator=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , prediction_loss_only=__lowercase , )
# Training
if training_args.do_train:
_A = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A = trainer.evaluate()
_A = math.exp(eval_output["eval_loss"] )
_A = {"perplexity": perplexity}
_A = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(__lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , __lowercase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(__lowercase )
return results
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 79
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46
| 0
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_SCREAMING_SNAKE_CASE = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_SCREAMING_SNAKE_CASE = logging.WARNING
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Any = os.getenv('DATASETS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( ):
return __name__.split('.' )[0]
def SCREAMING_SNAKE_CASE__ ( ):
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( ):
# Apply our default configuration to the library root logger.
snake_case_ : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def SCREAMING_SNAKE_CASE__ ( __a = None ):
if name is None:
snake_case_ : List[Any] = _get_library_name()
return logging.getLogger(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( __a ):
_get_library_root_logger().setLevel(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE_ :
def __init__( self : int , *_A : str , **_A : Dict ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
snake_case_ : Optional[int] = args[0] if args else None
def __iter__( self : List[str] ) -> Any:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , _A : int ) -> Tuple:
"""simple docstring"""
def empty_fn(*_A : int , **_A : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , _A : int , _A : Any , _A : int ) -> Dict:
"""simple docstring"""
return
_SCREAMING_SNAKE_CASE = True
class SCREAMING_SNAKE_CASE_ :
def __call__( self : Optional[Any] , *_A : Optional[Any] , _A : Tuple=False , **_A : Optional[Any] ) -> Dict:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def UpperCAmelCase_ ( self : Any , *_A : Optional[int] , **_A : str ) -> int:
"""simple docstring"""
snake_case_ : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_SCREAMING_SNAKE_CASE = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
snake_case_ : str = True
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
snake_case_ : str = False
| 88
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 88
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.