code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''generated'''
def __init__( self :Any , *snake_case :Any , **snake_case :List[str] ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple=None , snake_case :List[str]=None , snake_case :Dict=None , snake_case :str=None , snake_case :List[Any]=None , snake_case :Dict=None , **snake_case :Optional[int] , ):
'''simple docstring'''
A_ : str = {}
if truncation is not None:
A_ : str = truncation
A_ : Optional[Any] = generate_kwargs
A_ : Tuple = {}
if return_tensors is not None and return_type is None:
A_ : Dict = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
A_ : Any = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ : Optional[Any] = self.tokenizer.encode(snake_case , add_special_tokens=snake_case )
if len(snake_case ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
A_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int , snake_case :int , snake_case :int ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE ( self :str , *snake_case :Dict , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Any = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , snake_case ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
A_ : Tuple = ([prefix + arg for arg in args[0]],)
A_ : Any = True
elif isinstance(args[0] , snake_case ):
A_ : Optional[int] = (prefix + args[0],)
A_ : List[Any] = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
A_ : int = self.tokenizer(*snake_case , padding=snake_case , truncation=snake_case , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self :int , *snake_case :Dict , **snake_case :Any ):
'''simple docstring'''
A_ : Optional[int] = super().__call__(*snake_case , **snake_case )
if (
isinstance(args[0] , snake_case )
and all(isinstance(snake_case , snake_case ) for el in args[0] )
and all(len(snake_case ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Optional[Any] , snake_case :Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case :Optional[int] ):
'''simple docstring'''
A_ : Dict = self._parse_and_tokenize(snake_case , truncation=snake_case , **snake_case )
return inputs
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , **snake_case :Optional[int] ):
'''simple docstring'''
if self.framework == "pt":
A_ , A_ : List[Any] = model_inputs["input_ids"].shape
elif self.framework == "tf":
A_ , A_ : Any = tf.shape(model_inputs["input_ids"] ).numpy()
A_ : List[Any] = generate_kwargs.get("min_length" , self.model.config.min_length )
A_ : int = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(snake_case , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
A_ : Any = self.model.generate(**snake_case , **snake_case )
A_ : str = output_ids.shape[0]
if self.framework == "pt":
A_ : Any = output_ids.reshape(snake_case , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A_ : List[str] = tf.reshape(snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Tuple , snake_case :Optional[int]=ReturnType.TEXT , snake_case :List[str]=False ):
'''simple docstring'''
A_ : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A_ : Union[str, Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
A_ : Dict = {
f"{self.return_name}_text": self.tokenizer.decode(
snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
}
records.append(snake_case )
return records
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''summary'''
def __call__( self :List[Any] , *snake_case :Optional[Any] , **snake_case :List[str] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int , snake_case :int , snake_case :int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''translation'''
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :int , snake_case :int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def SCREAMING_SNAKE_CASE ( self :int , *snake_case :Optional[int] , snake_case :Tuple=TruncationStrategy.DO_NOT_TRUNCATE , snake_case :Optional[int]=None , snake_case :Dict=None ):
'''simple docstring'''
if getattr(self.tokenizer , "_build_translation_inputs" , snake_case ):
return self.tokenizer._build_translation_inputs(
*snake_case , return_tensors=self.framework , truncation=snake_case , src_lang=snake_case , tgt_lang=snake_case )
else:
return super()._parse_and_tokenize(*snake_case , truncation=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Union[str, Any]=None , snake_case :List[str]=None , **snake_case :int ):
'''simple docstring'''
A_ , A_ , A_ : int = super()._sanitize_parameters(**snake_case )
if src_lang is not None:
A_ : List[Any] = src_lang
if tgt_lang is not None:
A_ : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A_ : Tuple = kwargs.get("task" , self.task )
A_ : int = task.split("_" )
if task and len(snake_case ) == 4:
# translation, XX, to YY
A_ : Optional[int] = items[1]
A_ : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self :Any , *snake_case :List[str] , **snake_case :int ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
| 300
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_UpperCAmelCase : List[str] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
_UpperCAmelCase : List[Any] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Dict = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type="np" )
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
_UpperCAmelCase : Dict = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Dict = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type="np" )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : str = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase : Optional[int] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
_UpperCAmelCase : Tuple = "A painting of a squirrel eating a burger"
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_5 , output_type="np" , use_karras_sigmas=__lowerCamelCase , )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : str = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 366
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : str = max_resolution
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Optional[Any] = do_convert_rgb
_UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : str = 2_0_4_8
_UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCAmelCase : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
_UpperCAmelCase : Any = "Hello"
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
_UpperCAmelCase : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCAmelCase : List[Any] = 3
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 17
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Any = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 225
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCamelCase__ : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCamelCase__ : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCamelCase__ : int = BeautifulSoup(res.text, 'html.parser')
lowerCamelCase__ : List[str] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 225
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = ReformerTokenizer
lowerCamelCase__ = ReformerTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
def __a ( self ) -> List[Any]:
super().setUp()
lowerCAmelCase_ = ReformerTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> int:
lowerCAmelCase_ = "<s>"
lowerCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __a ( self ) -> int:
lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_a ) , 1000 )
def __a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "I was born in 92000, and this is falsé."
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self , _a=15 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
lowerCAmelCase_ = "This is a simple input"
lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ = ("This is a simple input", "This is a pair")
lowerCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , )
def __a ( self ) -> str:
pass
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = ReformerTokenizer(_a , keep_accents=_a )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> str:
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "Hello World!"
lowerCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __a ( self ) -> str:
lowerCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def __a ( self ) -> Tuple:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ = " ".join(_a )
lowerCAmelCase_ = self.big_tokenizer.encode_plus(_a , return_tensors="pt" )
lowerCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
lowerCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCAmelCase_ = encoded_sequence["input_ids"].shape
lowerCAmelCase_ = ReformerModel(_a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def __a ( self ) -> List[str]:
# fmt: off
lowerCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_a , sequences=_a , )
| 22
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , *__a : int , **__a : str ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Union[str, Any] = "▁"
_snake_case : Any = {"vocab_file": "prophetnet.tokenizer"}
_snake_case : Tuple = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
__snake_case : Dict = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
__snake_case : Optional[int] = token.rstrip("\n" )
__snake_case : Union[str, Any] = index
return vocab
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[Any]="[SEP]" , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : Optional[int]="[UNK]" , lowerCamelCase : Dict="[PAD]" , lowerCamelCase : str="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : int , ) -> None:
__snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
__snake_case : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__snake_case : List[str] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__snake_case : List[str] = F'[unused{i}]'
__snake_case : int = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__snake_case : Any = 12
__snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase )
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.__dict__.copy()
__snake_case : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Tuple = {}
__snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : str ) -> str:
return len(self.sp_model ) + self.fairseq_offset
def __snake_case ( self : Any ) -> int:
__snake_case : Dict = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : str , lowerCamelCase : str ) -> str:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : Dict ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : str = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip()
return out_string
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__snake_case : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 123
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "Salesforce/blip-image-captioning-base"
lowerCAmelCase__ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowerCAmelCase__ = "image_captioner"
lowerCAmelCase__ = AutoModelForVisionaSeq
lowerCAmelCase__ = ["image"]
lowerCAmelCase__ = ["text"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.pre_processor(images=UpperCAmelCase , return_tensors="pt" )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.model.generate(**UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )[0].strip()
| 351
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="convnextv2"
def __init__( self : int , a : Dict=3 , a : List[str]=4 , a : str=4 , a : Dict=None , a : List[str]=None , a : Tuple="gelu" , a : Tuple=0.02 , a : Dict=1e-1_2 , a : Any=0.0 , a : List[str]=2_24 , a : Optional[Any]=None , a : int=None , **a : Any , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_stages
__lowerCamelCase = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
__lowerCamelCase = [3, 3, 9, 3] if depths is None else depths
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = drop_path_rate
__lowerCamelCase = image_size
__lowerCamelCase = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__lowerCamelCase , __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 67
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , **lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 252
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a :List[str] = logging.get_logger(__name__)
a :Union[str, Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = """bit"""
_SCREAMING_SNAKE_CASE :Dict = ["""preactivation""", """bottleneck"""]
_SCREAMING_SNAKE_CASE :Dict = ["""SAME""", """VALID"""]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1_024, 2_048] , _a=[3, 4, 6, 3] , _a="preactivation" , _a="relu" , _a=None , _a=32 , _a=0.0 , _a=False , _a=32 , _a=1 , _a=None , _a=None , **_a , ) -> str:
"""simple docstring"""
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE__ : Any = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : str = embedding_size
SCREAMING_SNAKE_CASE__ : str = hidden_sizes
SCREAMING_SNAKE_CASE__ : Tuple = depths
SCREAMING_SNAKE_CASE__ : List[str] = layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = global_padding
SCREAMING_SNAKE_CASE__ : int = num_groups
SCREAMING_SNAKE_CASE__ : Any = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = embedding_dynamic_padding
SCREAMING_SNAKE_CASE__ : List[str] = output_stride
SCREAMING_SNAKE_CASE__ : Dict = width_factor
SCREAMING_SNAKE_CASE__ : str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
SCREAMING_SNAKE_CASE__ : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 352
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = b, a % b
return a
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def _lowercase ( ) -> Union[str, Any]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 56
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_a = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_a = {
'''facebook/xglm-564M''': 2048,
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase ))
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 39
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
__lowerCAmelCase : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowerCAmelCase : Any = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase : str ) -> int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase : Any = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : List[str] = self.get_rust_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_image_processor()
__lowerCAmelCase : Dict = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
__lowerCAmelCase : str = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCAmelCase : List[Any] = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
__lowerCAmelCase : int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[int] = image_processor(lowerCAmelCase , return_tensors="""np""" )
__lowerCAmelCase : Any = processor(images=lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = self.get_image_processor()
__lowerCAmelCase : List[Any] = self.get_tokenizer()
__lowerCAmelCase : str = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : List[Any] = """lower newer"""
__lowerCAmelCase : str = processor(text=lowerCAmelCase )
__lowerCAmelCase : Tuple = tokenizer(lowerCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : str = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : Any = """lower newer"""
__lowerCAmelCase : List[str] = self.prepare_image_inputs()
__lowerCAmelCase : List[str] = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : List[str] = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Dict = processor.batch_decode(lowerCAmelCase )
__lowerCAmelCase : Tuple = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : Any = AlignProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : List[Any] = """lower newer"""
__lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__lowerCAmelCase : List[Any] = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 362
|
import numpy as np
import qiskit
def snake_case_ (__A : int = 8 , __A : int | None = None ) -> str:
__lowerCAmelCase : List[Any] = np.random.default_rng(seed=__A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
__lowerCAmelCase : List[Any] = rng.integers(2 , size=__A )
# The set of states Alice will prepare.
__lowerCAmelCase : List[str] = rng.integers(2 , size=__A )
# Measurement basis for Bob's qubits.
__lowerCAmelCase : List[Any] = rng.integers(2 , size=__A )
# Quantum Circuit to simulate BB84
__lowerCAmelCase : int = qiskit.QuantumCircuit(__A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__A ):
if alice_state[index] == 1:
bbaa_circ.x(__A )
if alice_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__A ):
if bob_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowerCAmelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowerCAmelCase : Optional[Any] = qiskit.execute(__A , __A , shots=1 , seed_simulator=__A )
# Returns the result of measurement.
__lowerCAmelCase : List[Any] = job.result().get_counts(__A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowerCAmelCase : Optional[int] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__A , __A , __A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowerCAmelCase : Tuple = gen_key[:key_len] if len(__A ) >= key_len else gen_key.ljust(__A , """0""" )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 139
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[Any] = {'''vocab_file''': '''vocab.json'''}
__SCREAMING_SNAKE_CASE :Tuple = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__SCREAMING_SNAKE_CASE :List[str] = {'''mgp-str''': 27}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , snake_case_ : List[str] , snake_case_ : List[Any]="[GO]" , snake_case_ : Optional[Any]="[GO]" , snake_case_ : Union[str, Any]="[s]" , snake_case_ : Any="[GO]" , **snake_case_ : Dict ):
super().__init__(
unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase = json.load(snake_case_ )
_UpperCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def lowercase ( self : str ):
return len(self.vocab )
def lowercase ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def lowercase ( self : Optional[Any] , snake_case_ : int ):
_UpperCAmelCase = []
for s in text:
char_tokens.extend(snake_case_ )
return char_tokens
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] ):
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def lowercase ( self : int , snake_case_ : int ):
return self.decoder.get(snake_case_ )
def lowercase ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case_ ) )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + "\n" )
return (vocab_file,)
| 22
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
__A = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def __a ( lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
config.addinivalue_line("""markers""" ,"""torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_= tmp_path_factory.getbasetemp() / """cache"""
UpperCAmelCase_= test_hf_cache_home / """datasets"""
UpperCAmelCase_= test_hf_cache_home / """metrics"""
UpperCAmelCase_= test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" ,str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" ,str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" ,str(lowerCAmelCase_ ) )
UpperCAmelCase_= test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" ,str(lowerCAmelCase_ ) )
UpperCAmelCase_= test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" ,str(lowerCAmelCase_ ) )
@pytest.fixture(autouse=lowerCAmelCase_ ,scope="""session""" )
def __a ( ) -> Optional[int]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" ,lowerCAmelCase_ )
@pytest.fixture
def __a ( lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" ,lowerCAmelCase_ )
| 277
|
import warnings
from functools import wraps
from typing import Callable
def __a ( lowerCAmelCase_ : Callable ) -> Callable:
'''simple docstring'''
@wraps(lowerCAmelCase_ )
def _inner_fn(*lowerCAmelCase_ : List[Any] ,**lowerCAmelCase_ : Tuple ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowerCAmelCase_ ,)
return fn(*lowerCAmelCase_ ,**lowerCAmelCase_ )
return _inner_fn
| 277
| 1
|
"""simple docstring"""
def lowercase ( __snake_case : Tuple ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_A , _A ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_A ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 297
| 0
|
import argparse
import json
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=__a , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=__a , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=__a , help="""where to store parsed gold_data_path file""" , )
UpperCamelCase :Dict = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
UpperCamelCase :Optional[Any] = json.load(__a )
for dpr_record in tqdm(__a ):
UpperCamelCase :Optional[Any] = dpr_record["""question"""]
UpperCamelCase :List[str] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(__a ) + """\n""" )
if __name__ == "__main__":
main()
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62
| 0
|
from functools import lru_cache
@lru_cache
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = CycleDiffusionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(lowercase_ )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ = image / 2 + 0.5
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(lowercase_ )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Union[str, Any] ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase_ , '''half''' ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def A_ ( self : List[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A_ ( self : List[str] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 56
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self : Any ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def a_ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
A__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A__ = DDPMScheduler()
A__ = AudioDiffusionPipeline(vqvae=__lowerCAmelCase , unet=self.dummy_unet , mel=__lowerCAmelCase , scheduler=__lowerCAmelCase )
A__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(42 )
A__ = pipe(generator=__lowerCAmelCase , steps=4 )
A__ = output.audios[0]
A__ = output.images[0]
A__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(42 )
A__ = pipe(generator=__lowerCAmelCase , steps=4 , return_dict=__lowerCAmelCase )
A__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
A__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
A__ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A__ = DDIMScheduler()
A__ = self.dummy_vqvae_and_unet
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowerCAmelCase , scheduler=__lowerCAmelCase )
A__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
np.random.seed(0 )
A__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(42 )
A__ = pipe(raw_audio=__lowerCAmelCase , generator=__lowerCAmelCase , start_step=5 , steps=10 )
A__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
A__ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A__ = self.dummy_unet_condition
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowerCAmelCase , mel=__lowerCAmelCase , scheduler=__lowerCAmelCase )
A__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
np.random.seed(0 )
A__ = torch.rand((1, 1, 10) )
A__ = pipe(generator=__lowerCAmelCase , encoding=__lowerCAmelCase )
A__ = output.images[0]
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
A__ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = torch_device
A__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
A__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(42 )
A__ = pipe(generator=__lowerCAmelCase )
A__ = output.audios[0]
A__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
A__ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 371
|
import math
def __lowerCamelCase ( __a :int ) -> bool:
"""simple docstring"""
A__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__a )
def __lowerCamelCase ( __a :float = 1 / 1_2_3_4_5 ) -> int:
"""simple docstring"""
A__ = 0
A__ = 0
A__ = 3
while True:
A__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__a ):
A__ = int(__a )
total_partitions += 1
if check_partition_perfect(__a ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__a )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 276
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _a , unittest.TestCase ):
UpperCAmelCase__ : str = CTRLTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def snake_case_ ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
UpperCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__, range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
UpperCamelCase : str = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
UpperCamelCase : Union[str, Any] = {"unk_token": "<unk>"}
UpperCamelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE__ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[Any] = "adapt react readapt apt"
UpperCamelCase : Tuple = "adapt react readapt apt"
return input_text, output_text
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[str] = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase : Any = "adapt react readapt apt"
UpperCamelCase : Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
UpperCamelCase : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ )
| 119
|
'''simple docstring'''
import numpy
# List of input, output pairs
A_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
A_ = [2, 4, 1, 5]
A_ = len(train_data)
A_ = 0.009
def A_ ( snake_case , snake_case="train" ):
return calculate_hypothesis_value(snake_case , snake_case ) - output(
snake_case , snake_case )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = 0
for i in range(len(snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A_ ( snake_case , snake_case ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A_ ( snake_case , snake_case ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A_ ( snake_case , snake_case=m ):
SCREAMING_SNAKE_CASE:Dict = 0
for i in range(snake_case ):
if index == -1:
summation_value += _error(snake_case )
else:
summation_value += _error(snake_case ) * train_data[i][0][index]
return summation_value
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = summation_of_cost_derivative(snake_case , snake_case ) / m
return cost_derivative_value
def A_ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE:List[str] = 0.00_0002
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
while True:
j += 1
SCREAMING_SNAKE_CASE:List[str] = [0, 0, 0, 0]
for i in range(0 , len(snake_case ) ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case , snake_case , atol=snake_case , rtol=snake_case , ):
break
SCREAMING_SNAKE_CASE:List[str] = temp_parameter_vector
print(("Number of iterations:", j) )
def A_ ( ):
for i in range(len(snake_case ) ):
print(("Actual output value:", output(snake_case , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(snake_case , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 139
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase__ = logging.getLogger(__name__)
class A__ ( __magic_name__ ):
lowercase = 'token-classification'
def __init__( self : Optional[int] , a : List[str] ):
'''simple docstring'''
if type(a ) == dict:
lowerCAmelCase__ : List[str] = Namespace(**a )
lowerCAmelCase__ : int = import_module('tasks' )
try:
lowerCAmelCase__ : Any = getattr(a , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowerCAmelCase__ : List[str] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(a , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
return self.model(**a )
def _lowerCamelCase ( self : Any , a : Any , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : int = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : List[Any] = self(**a )
lowerCAmelCase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : int = self._feature_file(a )
if os.path.exists(a ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a )
lowerCAmelCase__ : Any = torch.load(a )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowerCAmelCase__ : str = self.token_classification_task.read_examples_from_file(args.data_dir , a )
lowerCAmelCase__ : str = self.token_classification_task.convert_examples_to_features(
a , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=a , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , a )
torch.save(a , a )
def _lowerCamelCase ( self : str , a : int , a : int , a : bool = False ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self._feature_file(a )
logger.info('Loading features from cached file %s' , a )
lowerCAmelCase__ : str = torch.load(a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(a , a , a , a ) , batch_size=a )
def _lowerCamelCase ( self : int , a : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
"""Compute validation""" ""
lowerCAmelCase__ : Optional[int] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : str = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**a )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = outputs[:2]
lowerCAmelCase__ : Dict = logits.detach().cpu().numpy()
lowerCAmelCase__ : List[str] = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[Any] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.stack([x['val_loss'] for x in outputs] ).mean()
lowerCAmelCase__ : Optional[Any] = np.concatenate([x['pred'] for x in outputs] , axis=0 )
lowerCAmelCase__ : Dict = np.argmax(a , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowerCAmelCase__ : Optional[Any] = dict(enumerate(self.labels ) )
lowerCAmelCase__ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : int = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(a , a ),
'precision': precision_score(a , a ),
'recall': recall_score(a , a ),
'f1': fa_score(a , a ),
}
lowerCAmelCase__ : Optional[Any] = dict(results.items() )
lowerCAmelCase__ : Optional[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : Any , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self._eval_end(a )
lowerCAmelCase__ : Any = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[str] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(a )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(a , a )
parser.add_argument(
'--task_type' , default='NER' , type=a , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=a , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=a , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = NERTransformer(args)
lowerCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
lowerCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) ->Any:
snake_case__ : Optional[int] = inspect.getfile(accelerate.test_utils )
snake_case__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
snake_case__ : Optional[int] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase_ ( self : Union[str, Any] ) ->Union[str, Any]:
snake_case__ : List[Any] = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case__ : int = [sys.executable] + distributed_args
execute_subprocess_async(_snake_case, env=os.environ.copy() )
| 277
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
| 1
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A__ ( yaml.SafeLoader ):
"""simple docstring"""
def a_ ( self , __snake_case ):
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(__snake_case ) if isinstance(__snake_case , __snake_case ) else key for key in keys]
snake_case = Counter(__snake_case )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def a_ ( self , __snake_case , __snake_case=False ):
snake_case = super().construct_mapping(__snake_case , deep=__snake_case )
self._check_no_duplicates_on_constructed_node(__snake_case )
return mapping
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase_ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def a_ ( cls , __snake_case ):
with open(__snake_case , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__snake_case )
else:
return cls()
def a_ ( self , __snake_case ):
if path.exists():
with open(__snake_case , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(__snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__snake_case )
def a_ ( self , __snake_case = None ):
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(__snake_case )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def a_ ( cls , __snake_case ):
snake_case = yaml.load(__snake_case , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__snake_case )
def a_ ( self ):
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__snake_case , allow_unicode=__snake_case , encoding='''utf-8''' , ).decode('''utf-8''' )
_SCREAMING_SNAKE_CASE : Optional[int] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_SCREAMING_SNAKE_CASE : List[str] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
_SCREAMING_SNAKE_CASE : List[str] = ap.parse_args()
_SCREAMING_SNAKE_CASE : str = Path(args.readme_filepath)
_SCREAMING_SNAKE_CASE : List[str] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 213
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'efficientnet'
def __init__( self , __snake_case = 3 , __snake_case = 6_0_0 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2_5_6_0 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.001 , __snake_case = 0.99 , __snake_case = 0.5 , __snake_case = 0.2 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = num_channels
snake_case = image_size
snake_case = width_coefficient
snake_case = depth_coefficient
snake_case = depth_divisor
snake_case = kernel_sizes
snake_case = in_channels
snake_case = out_channels
snake_case = depthwise_padding
snake_case = strides
snake_case = num_block_repeats
snake_case = expand_ratios
snake_case = squeeze_expansion_ratio
snake_case = hidden_act
snake_case = hidden_dim
snake_case = pooling_type
snake_case = initializer_range
snake_case = batch_norm_eps
snake_case = batch_norm_momentum
snake_case = dropout_rate
snake_case = drop_connect_rate
snake_case = sum(__snake_case ) * 4
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-5
| 213
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase : Tuple = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowercase : List[str] = value
else:
lowercase : Union[str, Any] = value
return new_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[Any]:
lowercase : List[str] = """"""
if is_panoptic:
lowercase : Optional[Any] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase : int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowercase : Any = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase : str = in_proj_weight[:256, :]
lowercase : List[Any] = in_proj_bias[:256]
lowercase : Union[str, Any] = in_proj_weight[256:512, :]
lowercase : Optional[int] = in_proj_bias[256:512]
lowercase : List[Any] = in_proj_weight[-256:, :]
lowercase : str = in_proj_bias[-256:]
def _snake_case( ) -> Optional[int]:
lowercase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Any = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase : Dict = """resnet101"""
if "dc5" in model_name:
lowercase : str = True
lowercase : Tuple = """panoptic""" in model_name
if is_panoptic:
lowercase : int = 250
else:
lowercase : Optional[Any] = 91
lowercase : Optional[Any] = """huggingface/label-files"""
lowercase : Tuple = """coco-detection-id2label.json"""
lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase : str = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowercase : List[Any] = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
# prepare image
lowercase : List[str] = prepare_img()
lowercase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : Tuple = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
lowercase : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase : str = """conditional_detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase : Tuple = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowercase : str = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowercase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = val
# finally, create HuggingFace model and load state dict
lowercase : int = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowercase : List[str] = conditional_detr(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase : Optional[int] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 20
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 0
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
__A : int = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
__A : Optional[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
__A : List[Any] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def a__ ( self :Any ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] ,)
def a__ ( self :Optional[Any] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def a__ ( self :List[str] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Any="uniform_average" ,_UpperCamelCase :int=True ):
snake_case_ : int = mean_squared_error(
_UpperCamelCase ,_UpperCamelCase ,sample_weight=_UpperCamelCase ,multioutput=_UpperCamelCase ,squared=_UpperCamelCase )
return {"mse": mse}
| 8
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,):
snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : str = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : int = min_resolution
snake_case_ : int = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Any = apply_ocr
def a__ ( self :Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self :int ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self :Any ):
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) )
def a__ ( self :int ):
snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} )
def a__ ( self :Optional[Any] ):
pass
def a__ ( self :Union[str, Any] ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,Image.Image )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_UpperCamelCase )
self.assertIsInstance(encoding.boxes ,_UpperCamelCase )
# Test batched
snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Tuple ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Optional[Any] ):
# Initialize image_processing
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :List[Any] ):
# with apply_OCR = True
snake_case_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_UpperCamelCase )
self.assertListEqual(encoding.boxes ,_UpperCamelCase )
# with apply_OCR = False
snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
| 8
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Dict = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCamelCase : List[Any] = 6
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = (2, 2, 18, 2)
UpperCamelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
UpperCamelCase : Dict = 12
UpperCamelCase : Dict = 192
UpperCamelCase : str = (2, 2, 18, 2)
UpperCamelCase : Any = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCamelCase : Optional[int] = window_size
UpperCamelCase : Union[str, Any] = embed_dim
UpperCamelCase : Dict = depths
UpperCamelCase : str = num_heads
return config
def A_ ( _lowerCAmelCase ) -> List[Any]:
if "encoder.mask_token" in name:
UpperCamelCase : List[str] = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCamelCase : str = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCamelCase : Union[str, Any] = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
UpperCamelCase : Dict = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase : str = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase : Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
UpperCamelCase : Any = """layernorm.weight"""
if name == "encoder.norm.bias":
UpperCamelCase : Optional[Any] = """layernorm.bias"""
if "decoder" in name:
pass
else:
UpperCamelCase : List[Any] = """swin.""" + name
return name
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
for key in orig_state_dict.copy().keys():
UpperCamelCase : Tuple = orig_state_dict.pop(_UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCamelCase : Tuple = key.split("." )
UpperCamelCase : Optional[int] = int(key_split[2] )
UpperCamelCase : List[Any] = int(key_split[4] )
UpperCamelCase : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase : List[Any] = val[:dim, :]
UpperCamelCase : str = val[
dim : dim * 2, :
]
UpperCamelCase : int = val[-dim:, :]
else:
UpperCamelCase : Optional[Any] = val[
:dim
]
UpperCamelCase : Optional[Any] = val[
dim : dim * 2
]
UpperCamelCase : List[Any] = val[
-dim:
]
else:
UpperCamelCase : Optional[int] = val
return orig_state_dict
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : List[Any] = torch.load(_UpperCAmelCase , map_location="cpu" )["""model"""]
UpperCamelCase : Any = get_swin_config(_UpperCAmelCase )
UpperCamelCase : Dict = SwinForMaskedImageModeling(_UpperCAmelCase )
model.eval()
UpperCamelCase : Union[str, Any] = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
UpperCamelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase : Any = ViTImageProcessor(size={"height": 192, "width": 192} )
UpperCamelCase : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
UpperCamelCase : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**_UpperCAmelCase ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'xlm-roberta'
def __init__( self : Tuple , snake_case : Optional[Any]=3_0522 , snake_case : Optional[Any]=768 , snake_case : Any=12 , snake_case : Optional[int]=12 , snake_case : Any=3072 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : Dict=0.1 , snake_case : Tuple=512 , snake_case : Union[str, Any]=2 , snake_case : Any=0.02 , snake_case : Union[str, Any]=1e-12 , snake_case : Any=1 , snake_case : Optional[int]=0 , snake_case : Optional[int]=2 , snake_case : int="absolute" , snake_case : Dict=True , snake_case : str=None , **snake_case : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
A__ : Optional[int] = vocab_size
A__ : Optional[Any] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Union[str, Any] = hidden_act
A__ : Any = intermediate_size
A__ : Dict = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : Tuple = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Optional[int] = initializer_range
A__ : Any = layer_norm_eps
A__ : List[str] = position_embedding_type
A__ : str = use_cache
A__ : int = classifier_dropout
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
@property
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
A__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 296
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : Dict=7 , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Any=True , snake_case : List[str]=99 , snake_case : str=64 , snake_case : Optional[int]=5 , snake_case : str=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : Union[str, Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : List[str] = seq_length
A__ : Optional[int] = is_training
A__ : Dict = use_input_mask
A__ : Any = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : List[str] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : Dict = num_choices
A__ : Dict = scope
A__ : List[Any] = vocab_size - 1
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
A__ : Any = GPTNeoXModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case )
A__ : Optional[int] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = True
A__ : str = GPTNeoXModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : int = self.num_labels
A__ : int = GPTNeoXForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , snake_case : Tuple , snake_case : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
A__ : List[Any] = self.num_labels
A__ : Tuple = GPTNeoXForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Any = GPTNeoXForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = True
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
A__ : Tuple = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Tuple = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
A__ : List[Any] = output_from_no_past["""hidden_states"""][0]
A__ : List[str] = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["""hidden_states"""][0]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = GPTNeoXModelTester(self )
A__ : Any = ConfigTester(self , config_class=snake_case , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
A__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Union[str, Any] = GPTNeoXModel(snake_case )
original_model.to(snake_case )
original_model.eval()
A__ : Optional[int] = original_model(snake_case ).last_hidden_state
A__ : List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
A__ : Optional[int] = GPTNeoXModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
A__ : List[str] = scaled_model(snake_case ).last_hidden_state
A__ : Tuple = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
A__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case )
A__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
A__ : Tuple = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 )
A__ : Tuple = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
| 296
| 1
|
from __future__ import annotations
def a_ ( _A , _A ) -> set[str]:
"""simple docstring"""
snake_case__ , snake_case__ = set(_A ), [start]
while stack:
snake_case__ = stack.pop()
explored.add(_A )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_A )
return explored
__UpperCamelCase : Tuple = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 307
|
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 307
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A ( snake_case :Tuple , snake_case :Dict ) -> Any:
__UpperCamelCase = b.T
__UpperCamelCase = np.sum(np.square(snake_case ) , axis=1 )
__UpperCamelCase = np.sum(np.square(snake_case ) , axis=0 )
__UpperCamelCase = np.matmul(snake_case , snake_case )
__UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def A ( snake_case :List[Any] , snake_case :List[str] ) -> int:
__UpperCamelCase = x.reshape(-1 , 3 )
__UpperCamelCase = squared_euclidean_distance(snake_case , snake_case )
return np.argmin(snake_case , axis=1 )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = size if size is not None else {'height': 256, 'width': 256}
__UpperCamelCase = get_size_dict(__UpperCAmelCase )
__UpperCamelCase = np.array(__UpperCAmelCase ) if clusters is not None else None
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_normalize
__UpperCamelCase = do_color_quantize
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__UpperCAmelCase , size=(size['height'], size['width']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , ):
'''simple docstring'''
__UpperCamelCase = rescale(image=__UpperCAmelCase , scale=1 / 127.5 , data_format=__UpperCAmelCase )
__UpperCamelCase = image - 1
return image
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(__UpperCAmelCase )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCamelCase = clusters if clusters is not None else self.clusters
__UpperCamelCase = np.array(__UpperCAmelCase )
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=__UpperCAmelCase ) for image in images]
if do_color_quantize:
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCamelCase = np.array(__UpperCAmelCase )
__UpperCamelCase = color_quantize(__UpperCAmelCase , __UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCamelCase = images.shape[0]
__UpperCamelCase = images.reshape(__UpperCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCamelCase = list(__UpperCAmelCase )
else:
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'input_ids': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 360
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( snake_case :List[Any] , snake_case :Dict=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def A ( snake_case :Dict , snake_case :int=0 ) -> Optional[int]:
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item.replace('in_layers.0' , 'norm1' )
__UpperCamelCase = new_item.replace('in_layers.2' , 'conv1' )
__UpperCamelCase = new_item.replace('out_layers.0' , 'norm2' )
__UpperCamelCase = new_item.replace('out_layers.3' , 'conv2' )
__UpperCamelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__UpperCamelCase = new_item.replace('skip_connection' , 'conv_shortcut' )
__UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case :Optional[Any] , snake_case :Tuple=0 ) -> Tuple:
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item
__UpperCamelCase = new_item.replace('norm.weight' , 'group_norm.weight' )
__UpperCamelCase = new_item.replace('norm.bias' , 'group_norm.bias' )
__UpperCamelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__UpperCamelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case :int , snake_case :List[str] , snake_case :List[str] , snake_case :Any=None , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None ) -> Optional[int]:
assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__UpperCamelCase = old_checkpoint[path]
__UpperCamelCase = old_tensor.shape[0] // 3
__UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__UpperCamelCase = old_tensor.shape[0] // config['num_head_channels'] // 3
__UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
__UpperCamelCase = query.reshape(snake_case )
__UpperCamelCase = key.reshape(snake_case )
__UpperCamelCase = value.reshape(snake_case )
for path in paths:
__UpperCamelCase = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__UpperCamelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__UpperCamelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__UpperCamelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__UpperCamelCase = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__UpperCamelCase = old_checkpoint[path['old']][:, :, 0]
else:
__UpperCamelCase = old_checkpoint[path['old']]
def A ( snake_case :Optional[Any] , snake_case :Dict ) -> Optional[Any]:
__UpperCamelCase = {}
__UpperCamelCase = checkpoint['time_embed.0.weight']
__UpperCamelCase = checkpoint['time_embed.0.bias']
__UpperCamelCase = checkpoint['time_embed.2.weight']
__UpperCamelCase = checkpoint['time_embed.2.bias']
__UpperCamelCase = checkpoint['input_blocks.0.0.weight']
__UpperCamelCase = checkpoint['input_blocks.0.0.bias']
__UpperCamelCase = checkpoint['out.0.weight']
__UpperCamelCase = checkpoint['out.0.bias']
__UpperCamelCase = checkpoint['out.2.weight']
__UpperCamelCase = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the middle blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the output blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
for i in range(1 , snake_case ):
__UpperCamelCase = (i - 1) // (config['num_res_blocks'] + 1)
__UpperCamelCase = (i - 1) % (config['num_res_blocks'] + 1)
__UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
__UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
__UpperCamelCase = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
__UpperCamelCase = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
__UpperCamelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case )
if len(snake_case ):
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , )
__UpperCamelCase = middle_blocks[0]
__UpperCamelCase = middle_blocks[1]
__UpperCamelCase = middle_blocks[2]
__UpperCamelCase = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
__UpperCamelCase = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case )
for i in range(snake_case ):
__UpperCamelCase = i // (config['num_res_blocks'] + 1)
__UpperCamelCase = i % (config['num_res_blocks'] + 1)
__UpperCamelCase = [shave_segments(snake_case , 2 ) for name in output_blocks[i]]
__UpperCamelCase = {}
for layer in output_block_layers:
__UpperCamelCase , __UpperCamelCase = layer.split('.' )[0], shave_segments(snake_case , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case )
else:
__UpperCamelCase = [layer_name]
if len(snake_case ) > 1:
__UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
__UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__UpperCamelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__UpperCamelCase = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
__UpperCamelCase = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(snake_case ) == 2:
__UpperCamelCase = []
if len(snake_case ):
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case , )
else:
__UpperCamelCase = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__UpperCamelCase = '.'.join(['output_blocks', str(snake_case ), path['old']] )
__UpperCamelCase = '.'.join(['up_blocks', str(snake_case ), 'resnets', str(snake_case ), path['new']] )
__UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : Optional[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase : int = json.loads(f.read())
UpperCamelCase : Dict = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase : List[Any] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase : Dict = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 263
| 0
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : dict ):
lowercase_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase_ : set[int] = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for node in graph )
def lowercase__( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : set , __SCREAMING_SNAKE_CASE : set ):
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 213
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase__( ):
lowercase_ : Any = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Parse args
lowercase_ , lowercase_ : Dict = parser.parse_known_args()
if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
lowercase_ : int = parse_unknown_args(__SCREAMING_SNAKE_CASE )
# Run
lowercase_ : List[Any] = args.func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 213
| 1
|
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.ndarray:
return np.where(vector > 0 , __UpperCAmelCase , (alpha * (np.exp(__UpperCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Any = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase_: List[str] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(_lowerCamelCase ) , torch_builtin(_lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(_lowerCamelCase ) , gelu_new(_lowerCamelCase ) ) )
def _a ( self ):
UpperCamelCase_: Optional[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase_: Union[str, Any] = get_activation('gelu' )
UpperCamelCase_: int = get_activation('gelu_10' )
UpperCamelCase_: Union[str, Any] = torch_builtin(_lowerCamelCase )
UpperCamelCase_: List[str] = geluaa(_lowerCamelCase )
UpperCamelCase_: Dict = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_lowerCamelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a ( self ):
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(_lowerCamelCase ):
get_activation('bogus' )
with self.assertRaises(_lowerCamelCase ):
get_activation(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: str = get_activation('gelu' )
UpperCamelCase_: str = 1
UpperCamelCase_: int = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_lowerCamelCase ):
UpperCamelCase_: Tuple = acta.a
| 292
| 0
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def snake_case__( self : List[Any] ) ->Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple:
snake_case_ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse}
| 8
|
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
| 8
| 1
|
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 366
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'bird'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'Chef in the kitchen'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 80
| 0
|
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE_ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE_ = re.compile(r"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE_ = re.compile(r"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE_ = re.compile(r"""(_{2,})""")
SCREAMING_SNAKE_CASE_ = r"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE_ = r"""<>:/\|?*"""
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _uppercase_uppercase_re.sub(r"""\1_\2""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _lowercase_uppercase_re.sub(r"""\1_\2""" , _SCREAMING_SNAKE_CASE )
return name.lower()
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _single_underscore_re.split(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [_multiple_underscores_re.split(_SCREAMING_SNAKE_CASE ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) if n != """""" )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , _SCREAMING_SNAKE_CASE ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(_SCREAMING_SNAKE_CASE )}-{split}"""
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return F"""{filepath}*"""
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if shard_lengths:
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(_SCREAMING_SNAKE_CASE )]
if filetype_suffix:
SCREAMING_SNAKE_CASE = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 296
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = "efficientformer"
def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 296
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _snake_case :
UpperCamelCase__ = 42
# setable values
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _a , _a , _a ):
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
class _snake_case ( snake_case , snake_case ):
UpperCamelCase__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCamelCase__ = 42
@property
def SCREAMING_SNAKE_CASE ( self ):
return True
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.00_01 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ):
__magic_name__ : Union[str, Any] = dtype
def SCREAMING_SNAKE_CASE ( self , _a = None ):
if common is None:
__magic_name__ : List[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__magic_name__ : List[str] = jnp.array(1.0 , dtype=self.dtype )
__magic_name__ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = None ):
return sample
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = () ):
__magic_name__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__magic_name__ : Optional[Any] = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=None ):
__magic_name__ : int = state.common.alphas_cumprod[t]
__magic_name__ : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__magic_name__ : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__magic_name__ : List[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__magic_name__ : Tuple = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__magic_name__ : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__magic_name__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__magic_name__ : str = variance
__magic_name__ : Optional[Any] = state.common.betas[t]
__magic_name__ : str = (predicted_variance + 1) / 2
__magic_name__ : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a = None , _a = True , ):
__magic_name__ : str = timestep
if key is None:
__magic_name__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__magic_name__ , __magic_name__ : Optional[Any] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
__magic_name__ : Union[str, Any] = None
# 1. compute alphas, betas
__magic_name__ : Tuple = state.common.alphas_cumprod[t]
__magic_name__ : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__magic_name__ : Union[str, Any] = 1 - alpha_prod_t
__magic_name__ : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Tuple = model_output
elif self.config.prediction_type == "v_prediction":
__magic_name__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Any = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Tuple = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__magic_name__ : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__magic_name__ : Tuple = jax.random.split(_a , num=1 )
__magic_name__ : Optional[int] = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
__magic_name__ : List[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__magic_name__ : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , ):
return add_noise_common(state.common , _a , _a , _a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , ):
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ):
return self.config.num_train_timesteps
| 41
|
from typing import Any
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Union[str, Any] = data
__magic_name__ : str = None
class _snake_case :
def __init__( self ):
__magic_name__ : List[str] = None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.head
while temp is not None:
print(temp.data , end=" " )
__magic_name__ : Optional[int] = temp.next
print()
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = Node(_a )
__magic_name__ : List[str] = self.head
__magic_name__ : Union[str, Any] = new_node
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
if node_data_a == node_data_a:
return
else:
__magic_name__ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : Tuple = node_a.next
__magic_name__ : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
__magic_name__ , __magic_name__ : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
snake_case : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 41
| 1
|
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase :int = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Tuple = []
def parse_line(lowerCAmelCase : Tuple ):
for line in fp:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ : Tuple = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCamelCase__ ) > 0:
__magic_name__ : Any = '''\n'''.join(UpperCamelCase__ )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(UpperCamelCase__ )
buffer.clear()
continue
else:
__magic_name__ : int = line.strip()
buffer.append(UpperCamelCase__ )
if from_gh:
for filename in os.listdir(UpperCamelCase__ ):
__magic_name__ : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
else:
try:
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Optional[int] = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCamelCase__ , UpperCamelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return values.split(',' )
lowerCAmelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
lowerCAmelCase :List[Any] = parser.parse_args()
lowerCAmelCase :List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase :Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase :Tuple = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase :Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 331
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263
| 0
|
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> bool:
"""simple docstring"""
a_ : Any = len(__A )
a_ : Tuple = len(__A )
a_ : Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
a_ : Optional[int] = True
for i in range(__A ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
a_ : Any = True
if a[i].islower():
a_ : Tuple = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ : str = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] ) -> Tuple:
"""simple docstring"""
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , 'config.json' ) ) and os.path.isfile(
os.path.join(__A , 'config.json' ) ):
os.remove(os.path.join(__A , 'config.json' ) )
if os.path.exists(os.path.join(__A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__A , 'pytorch_model.bin' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Dict=False ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = 2
if unlogit:
a_ : List[str] = torch.pow(__A , __A )
a_ : Tuple = p * torch.log(__A )
a_ : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Tuple:
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(F"""{x + 1}""" for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Dict , __A : Union[str, Any] , __A : List[str]=True , __A : str=True , __A : int=None , __A : List[str]=False ) -> List[Any]:
"""simple docstring"""
a_ , a_ : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
a_ : Tuple = torch.zeros(__A , __A ).to(args.device )
a_ : Optional[int] = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
a_ : Tuple = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a_ : List[str] = None
a_ : Optional[Any] = 0.0
a_ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(__A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
a_ : Any = tuple(t.to(args.device ) for t in inputs )
((a_) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a_ : Tuple = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a_ , a_ , a_ : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
a_ : List[str] = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a_ : int = 2
a_ : Dict = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
a_ : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__A )
logger.info('Head ranked by importance scores' )
a_ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a_ : Tuple = torch.arange(
head_importance.numel() , device=args.device )
a_ : Optional[Any] = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : List[Any] , __A : str ) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ , a_ : Any = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
a_ : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __A , original_score * args.masking_threshold )
a_ : List[Any] = torch.ones_like(__A )
a_ : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
a_ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a_ : str = float('Inf' )
a_ : Any = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
a_ : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
a_ : Optional[Any] = new_head_mask.view(-1 )
a_ : Optional[int] = 0.0
a_ : List[str] = new_head_mask.view_as(__A )
a_ : Dict = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
a_ , a_ , a_ : int = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
a_ : Optional[int] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Union[str, Any] , __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a_ : Dict = datetime.now()
a_ , a_ , a_ : Union[str, Any] = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
a_ : Union[str, Any] = 1 / loss
a_ : List[Any] = datetime.now() - before_time
a_ : str = sum(p.numel() for p in model.parameters() )
a_ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
a_ : List[str] = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
a_ : str = sum(p.numel() for p in model.parameters() )
a_ : Union[str, Any] = datetime.now()
a_ , a_ , a_ : int = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
a_ : int = 1 / loss
a_ : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __A , __A , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __A , __A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__A , args.output_dir )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__A , type=__A , required=__A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__A , type=__A , required=__A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__A , type=__A , required=__A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__A , type=__A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__A , help='Batch size.' )
parser.add_argument('--seed' , type=__A , default=42 )
parser.add_argument('--local_rank' , type=__A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__A , default='' , help='Can be used for distant debugging.' )
a_ : List[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a_ : str = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
a_ : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a_ : Any = torch.device('cuda' , args.local_rank )
a_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a_ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a_ : List[Any] = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
a_ : Optional[int] = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __A )
# Prepare dataset
a_ : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a_ : Tuple = (torch.from_numpy(__A ),)
a_ : Optional[int] = TensorDataset(*__A )
a_ : Any = RandomSampler(__A )
a_ : str = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a_ : Optional[Any] = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 120
| 0
|
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = len(__A ), len(grid[0] )
if (
min(__A, __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(__A, row + 1, __A, __A )
count += depth_first_search(__A, row - 1, __A, __A )
count += depth_first_search(__A, __A, col + 1, __A )
count += depth_first_search(__A, __A, col - 1, __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292
| 0
|
"""simple docstring"""
A: Union[str, Any] = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 357
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Tuple=False ):
if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : List[str] = len(set_a.intersection(UpperCamelCase ) )
if alternative_union:
UpperCAmelCase : int = len(UpperCamelCase ) + len(UpperCamelCase )
else:
UpperCAmelCase : Any = len(set_a.union(UpperCamelCase ) )
return intersection / union
if isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) ):
UpperCAmelCase : Any = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase : Optional[int] = len(UpperCamelCase ) + len(UpperCamelCase )
return len(UpperCamelCase ) / union
else:
UpperCAmelCase : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase ) / len(UpperCamelCase )
return len(UpperCamelCase ) / len(UpperCamelCase )
return None
if __name__ == "__main__":
A: List[Any] = {"a", "b", "c", "d", "e"}
A: List[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 76
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCamelCase : Tuple = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
_lowerCamelCase : List[Any] = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(lowercase_ )
# emb -> embedding
if name.startswith('''emb.''' ):
A__ = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A__ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A__ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , lowercase_ )
# ffn -> feed_forward
A__ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , lowercase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A__ = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A__ = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A__ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A__ = '''rwkv.''' + name
A__ = weight
return state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=None ) -> Any:
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A__ = 50_277
A__ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=lowercase_ )
A__ = len(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=lowercase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase_ )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(lowercase_ , lowercase_ )
A__ = torch.load(lowercase_ , map_location='''cpu''' )
A__ = convert_state_dict(lowercase_ )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(lowercase_ )
for shard_file, shard in shards.items():
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
if index is not None:
A__ = os.path.join(lowercase_ , lowercase_ )
# Save the index as well
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
A__ = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '''\n'''
f.write(lowercase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(lowercase_ , lowercase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase_ , lowercase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A__ = AutoModelForCausalLM.from_pretrained(lowercase_ )
model.push_to_hub(lowercase_ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
_lowerCamelCase : str = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 14
|
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 3_84}
lowercase_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : str = do_resize
lowercase_ : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowercase_ : Optional[Any] = crop_pct if crop_pct is not None else 2_24 / 2_56
lowercase_ : Tuple = resample
lowercase_ : Tuple = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowercase_ : Optional[Any] = size['''shortest_edge''']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase_ : List[Any] = int(shortest_edge / crop_pct )
lowercase_ : Tuple = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ : Union[str, Any] = resample if resample is not None else self.resample
lowercase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : int = image_std if image_std is not None else self.image_std
lowercase_ : Optional[int] = size if size is not None else self.size
lowercase_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowercase_ : Optional[Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , crop_pct=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase_ : Dict = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowercase_ : Any = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : Any = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 264
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__SCREAMING_SNAKE_CASE ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 264
| 1
|
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_A : Any =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
a = CLIPConfig
a = ["""CLIPEncoderLayer"""]
def __init__( self: Union[str, Any] , UpperCamelCase__: CLIPConfig ):
super().__init__(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase__ : str = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase__ : Any = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=0.5 , UpperCamelCase__: Tuple=0.5 ):
lowerCamelCase__ : List[str] = self.vision_model(UpperCamelCase__ )[0]
lowerCamelCase__ : Optional[Any] = self.p_head(UpperCamelCase__ )
lowerCamelCase__ : Any = nsfw_detected.flatten()
lowerCamelCase__ : List[str] = nsfw_detected > p_threshold
lowerCamelCase__ : int = nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
lowerCamelCase__ : Optional[Any] = np.zeros(images[idx].shape )
lowerCamelCase__ : List[Any] = self.w_head(UpperCamelCase__ )
lowerCamelCase__ : Any = watermark_detected.flatten()
lowerCamelCase__ : Optional[int] = watermark_detected > w_threshold
lowerCamelCase__ : int = watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
lowerCamelCase__ : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 41
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A_ :
def __init__( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=1_3 , snake_case_ : List[Any]=1_0 , snake_case_ : Tuple=3 , snake_case_ : Tuple=2 , snake_case_ : List[str]=2 , snake_case_ : Optional[int]=2 , snake_case_ : Optional[Any]=True , snake_case_ : Union[str, Any]=True , snake_case_ : List[Any]=3_2 , snake_case_ : Optional[Any]=5 , snake_case_ : List[Any]=4 , snake_case_ : int=3_7 , snake_case_ : str="gelu" , snake_case_ : str=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Optional[Any]=1_0 , snake_case_ : List[str]=0.0_2 , snake_case_ : int=0.9 , snake_case_ : List[Any]=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = mask_ratio
_UpperCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_UpperCAmelCase = int(mask_ratio * self.seq_length )
def lowercase ( self : Tuple ):
_UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Optional[int] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowercase ( self : Tuple , snake_case_ : int , snake_case_ : str , snake_case_ : str ):
_UpperCAmelCase = VideoMAEModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Tuple ):
_UpperCAmelCase = VideoMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCAmelCase = torch.ones((self.num_masks,) )
_UpperCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_UpperCAmelCase = mask.expand(self.batch_size , -1 ).bool()
_UpperCAmelCase = model(snake_case_ , snake_case_ )
# model only returns predictions for masked patches
_UpperCAmelCase = mask.sum().item()
_UpperCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Any = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowerCamelCase : List[Any] = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Any = False
_lowerCamelCase : str = False
_lowerCamelCase : str = False
def lowercase ( self : List[str] ):
_UpperCAmelCase = VideoMAEModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def lowercase ( self : Any , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any]=False ):
_UpperCAmelCase = copy.deepcopy(snake_case_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCAmelCase = torch.ones((self.model_tester.num_masks,) )
_UpperCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_UpperCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
_UpperCAmelCase = bool_masked_pos.to(snake_case_ )
if return_labels:
if model_class in [
*get_values(snake_case_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
@slow
def lowercase ( self : List[Any] ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = VideoMAEModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ( self : Tuple ):
if not self.has_attentions:
pass
else:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCAmelCase = len(snake_case_ )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 1 , len(snake_case_ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase ( self : Tuple ):
def check_hidden_states_output(snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict ):
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case_ ) , snake_case_ )
_UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : int ):
pass
def UpperCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_UpperCAmelCase = np.load(__lowercase )
return list(__lowercase )
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : List[str] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : Tuple ):
_UpperCAmelCase = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
_UpperCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
def lowercase ( self : List[Any] ):
_UpperCAmelCase = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
# add boolean mask, indicating which patches to mask
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_UpperCAmelCase = torch.load(snake_case_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
_UpperCAmelCase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_UpperCAmelCase = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=snake_case_ )
self.assertEqual(outputs.logits.shape , snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_UpperCAmelCase = torch.tensor([0.5_1_4_2] , device=snake_case_ )
self.assertTrue(torch.allclose(outputs.loss , snake_case_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_UpperCAmelCase = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=snake_case_ ).to(
snake_case_ )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=snake_case_ )
self.assertTrue(torch.allclose(outputs.loss , snake_case_ , atol=1e-4 ) )
| 156
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__SCREAMING_SNAKE_CASE :Optional[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__SCREAMING_SNAKE_CASE :List[str] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 156
| 1
|
from math import ceil
def _A ( _lowercase = 10_01 ) -> str:
"""simple docstring"""
__UpperCamelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCamelCase = 2 * i + 1
__UpperCamelCase = 2 * i
__UpperCamelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 310
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'AutoImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : str ) -> Tuple:
lowerCAmelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
lowerCAmelCase_ : Tuple = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.image_processor
lowerCAmelCase_ : Any = False
def __call__( self : List[Any] , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = kwargs.pop("""images""" , lowerCamelCase )
lowerCAmelCase_ : Dict = kwargs.pop("""text""" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase_ : str = args[0]
lowerCAmelCase_ : Dict = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ : Dict = encodings["""input_ids"""]
return inputs
def __lowercase ( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : List[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def __lowercase ( self : str ) -> Union[str, Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = self.tokenizer
yield
lowerCAmelCase_ : List[Any] = self.image_processor
lowerCAmelCase_ : List[str] = False
def __lowercase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : str=False , lowerCamelCase : List[Any]=None ) -> Optional[int]:
if added_vocab is None:
lowerCAmelCase_ : str = self.tokenizer.get_added_vocab()
lowerCAmelCase_ : Union[str, Any] = {}
while tokens:
lowerCAmelCase_ : Dict = re.search(R"""<s_(.*?)>""" , lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase_ : Tuple = start_token.group(1 )
lowerCAmelCase_ : Tuple = re.search(RF'</s_{key}>' , lowerCamelCase , re.IGNORECASE )
lowerCAmelCase_ : Tuple = start_token.group()
if end_token is None:
lowerCAmelCase_ : str = tokens.replace(lowerCamelCase , """""" )
else:
lowerCAmelCase_ : List[str] = end_token.group()
lowerCAmelCase_ : Dict = re.escape(lowerCamelCase )
lowerCAmelCase_ : int = re.escape(lowerCamelCase )
lowerCAmelCase_ : Dict = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , lowerCamelCase , re.IGNORECASE )
if content is not None:
lowerCAmelCase_ : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase_ : str = self.tokenajson(lowerCamelCase , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if value:
if len(lowerCamelCase ) == 1:
lowerCAmelCase_ : List[Any] = value[0]
lowerCAmelCase_ : Optional[Any] = value
else: # leaf nodes
lowerCAmelCase_ : List[str] = []
for leaf in content.split(R"""<sep/>""" ):
lowerCAmelCase_ : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase_ : Any = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase )
if len(output[key] ) == 1:
lowerCAmelCase_ : Optional[Any] = output[key][0]
lowerCAmelCase_ : List[Any] = tokens[tokens.find(lowerCamelCase ) + len(lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if len(lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowercase ( self : Dict ) -> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase , )
return self.image_processor_class
@property
def __lowercase ( self : Dict ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase , )
return self.image_processor
| 120
| 0
|
import logging
from transformers import PretrainedConfig
lowerCAmelCase__ : Any =logging.getLogger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = '''bertabs'''
def __init__( self , _A=30_522 , _A=512 , _A=6 , _A=512 , _A=8 , _A=512 , _A=0.2 , _A=6 , _A=768 , _A=8 , _A=2_048 , _A=0.2 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_pos
__SCREAMING_SNAKE_CASE = enc_layers
__SCREAMING_SNAKE_CASE = enc_hidden_size
__SCREAMING_SNAKE_CASE = enc_heads
__SCREAMING_SNAKE_CASE = enc_ff_size
__SCREAMING_SNAKE_CASE = enc_dropout
__SCREAMING_SNAKE_CASE = dec_layers
__SCREAMING_SNAKE_CASE = dec_hidden_size
__SCREAMING_SNAKE_CASE = dec_heads
__SCREAMING_SNAKE_CASE = dec_ff_size
__SCREAMING_SNAKE_CASE = dec_dropout
| 118
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
def __lowercase ( a__ , a__=False ) -> Tuple:
__SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __lowercase ( a__ , a__ , a__=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE = ''
else:
__SCREAMING_SNAKE_CASE = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowercase ( a__ , a__ , a__ ) -> str:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def __lowercase ( a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
__SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = int(deit_name[-6:-4] )
__SCREAMING_SNAKE_CASE = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__SCREAMING_SNAKE_CASE = 1_92
__SCREAMING_SNAKE_CASE = 7_68
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith('small' ):
__SCREAMING_SNAKE_CASE = 3_84
__SCREAMING_SNAKE_CASE = 15_36
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__SCREAMING_SNAKE_CASE = 10_24
__SCREAMING_SNAKE_CASE = 40_96
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
# load original model from timm
__SCREAMING_SNAKE_CASE = timm.create_model(a__ , pretrained=a__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__SCREAMING_SNAKE_CASE = timm_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , a__ )
# load HuggingFace model
__SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(a__ ).eval()
model.load_state_dict(a__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__SCREAMING_SNAKE_CASE = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=a__ , crop_size=config.image_size )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = encoding['pixel_values']
__SCREAMING_SNAKE_CASE = model(a__ )
__SCREAMING_SNAKE_CASE = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1E-3 )
Path(a__ ).mkdir(exist_ok=a__ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : str =parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] =(DPMSolverSDEScheduler,)
UpperCamelCase__ : Tuple =1_0
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCamelCase__ )
return config
def __lowercase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.scheduler_classes[0]
__UpperCamelCase : Dict =self.get_scheduler_config()
__UpperCamelCase : Union[str, Any] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase : Dict =self.dummy_model()
__UpperCamelCase : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase : Tuple =sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : Union[str, Any] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =output.prev_sample
__UpperCamelCase : List[Any] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.scheduler_classes[0]
__UpperCamelCase : int =self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase : Optional[Any] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase : Optional[Any] =self.dummy_model()
__UpperCamelCase : Tuple =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase : List[Any] =sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : List[Any] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =output.prev_sample
__UpperCamelCase : int =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Dict =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.scheduler_classes[0]
__UpperCamelCase : str =self.get_scheduler_config()
__UpperCamelCase : Tuple =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
__UpperCamelCase : str =self.dummy_model()
__UpperCamelCase : Any =self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCamelCase : Tuple =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =output.prev_sample
__UpperCamelCase : List[Any] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Any =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.scheduler_classes[0]
__UpperCamelCase : Optional[int] =self.get_scheduler_config()
__UpperCamelCase : Optional[Any] =scheduler_class(**lowerCamelCase__ , use_karras_sigmas=lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
__UpperCamelCase : Tuple =self.dummy_model()
__UpperCamelCase : Optional[Any] =self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
__UpperCamelCase : int =sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
__UpperCamelCase : List[str] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =output.prev_sample
__UpperCamelCase : Optional[int] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 71
|
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 76
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Dict =logging.get_logger(__name__)
UpperCAmelCase__ : Any ={
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A ( a ):
__A = """umt5"""
__A = ["""past_key_values"""]
def __init__( self , UpperCAmelCase_=250112 , UpperCAmelCase_=512 , UpperCAmelCase_=64 , UpperCAmelCase_=1024 , UpperCAmelCase_=8 , UpperCAmelCase_=None , UpperCAmelCase_=6 , UpperCAmelCase_=32 , UpperCAmelCase_=128 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=1.0 , UpperCAmelCase_="gated-gelu" , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_="T5Tokenizer" , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , **UpperCAmelCase_ , ):
super().__init__(
is_encoder_decoder=UpperCAmelCase_ , tokenizer_class=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase =vocab_size
lowerCamelCase =d_model
lowerCamelCase =d_kv
lowerCamelCase =d_ff
lowerCamelCase =num_layers
lowerCamelCase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase =num_heads
lowerCamelCase =relative_attention_num_buckets
lowerCamelCase =relative_attention_max_distance
lowerCamelCase =dropout_rate
lowerCamelCase =layer_norm_epsilon
lowerCamelCase =initializer_factor
lowerCamelCase =feed_forward_proj
lowerCamelCase =use_cache
lowerCamelCase =self.feed_forward_proj.split("""-""" )
lowerCamelCase =act_info[-1]
lowerCamelCase =act_info[0] == """gated"""
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
lowerCamelCase ="""gelu_new"""
@property
def _snake_case ( self ):
return self.d_model
@property
def _snake_case ( self ):
return self.num_heads
@property
def _snake_case ( self ):
return self.num_layers
class __A ( a ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _snake_case ( self ):
lowerCamelCase ={
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowerCamelCase ="""past_encoder_sequence + sequence"""
lowerCamelCase ={0: """batch"""}
lowerCamelCase ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase ={0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _snake_case ( self ):
return 13
@property
def _snake_case ( self ):
return 5E-4
| 262
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _lowercase ( _UpperCAmelCase ) -> str:
lowerCamelCase =[]
for line in lines:
lowerCamelCase =re.sub(r"""#.*""" , """""" , _UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(_UpperCAmelCase )
lowerCamelCase ="""\n""".join(_UpperCAmelCase )
# Make a hash from all this code
lowerCamelCase =full_str.encode("""utf-8""" )
return shaaaa(_UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase__ : str ={
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase__ : Tuple ={
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase__ : Optional[Any] ={'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
UpperCAmelCase__ : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 262
| 1
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCAmelCase :
pass
| 264
|
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A_ ( datasets.BeamBasedBuilder ):
def lowerCAmelCase ( self : Dict):
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string')}) ,supervised_keys=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'examples': get_test_dummy_examples()})]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__)
class A_ ( datasets.BeamBasedBuilder ):
def lowerCAmelCase ( self : List[str]):
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})}) ,supervised_keys=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'examples': get_test_nested_examples()})
]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class A_ ( SCREAMING_SNAKE_CASE ):
@require_beam
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : Any = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ ,beam_runner='DirectRunner')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ ,builder.name ,'default' ,'0.0.0' ,F"{builder.name}-train.arrow")))
self.assertDictEqual(builder.info.features ,datasets.Features({'content': datasets.Value('string')}))
__lowerCamelCase : int = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,SCREAMING_SNAKE_CASE__)
self.assertEqual(dset['train'].info.splits['train'].num_examples ,SCREAMING_SNAKE_CASE__)
self.assertDictEqual(dset['train'][0] ,get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['train'][expected_num_examples - 1] ,get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json')))
del dset
@require_beam
def lowerCAmelCase ( self : Union[str, Any]):
import apache_beam as beam
__lowerCamelCase : str = beam.io.parquetio.WriteToParquet
__lowerCamelCase : Union[str, Any] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : Optional[int] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ ,beam_runner='DirectRunner')
with patch('apache_beam.io.parquetio.WriteToParquet') as write_parquet_mock:
__lowerCamelCase : str = partial(SCREAMING_SNAKE_CASE__ ,num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ ,builder.name ,'default' ,'0.0.0' ,F"{builder.name}-train-00000-of-00002.arrow")))
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ ,builder.name ,'default' ,'0.0.0' ,F"{builder.name}-train-00000-of-00002.arrow")))
self.assertDictEqual(builder.info.features ,datasets.Features({'content': datasets.Value('string')}))
__lowerCamelCase : Dict = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,SCREAMING_SNAKE_CASE__)
self.assertEqual(dset['train'].info.splits['train'].num_examples ,SCREAMING_SNAKE_CASE__)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content']) ,sorted(['foo', 'bar', 'foobar']))
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json')))
del dset
@require_beam
def lowerCAmelCase ( self : List[str]):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : List[str] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__)
self.assertRaises(datasets.builder.MissingBeamOptions ,builder.download_and_prepare)
@require_beam
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : Optional[int] = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ ,beam_runner='DirectRunner')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ ,builder.name ,'default' ,'0.0.0' ,F"{builder.name}-train.arrow")))
self.assertDictEqual(
builder.info.features ,datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})}))
__lowerCamelCase : Any = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,SCREAMING_SNAKE_CASE__)
self.assertEqual(dset['train'].info.splits['train'].num_examples ,SCREAMING_SNAKE_CASE__)
self.assertDictEqual(dset['train'][0] ,get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['train'][expected_num_examples - 1] ,get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json')))
del dset
| 113
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 113
| 1
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__lowerCAmelCase : List[Any] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__lowerCAmelCase : Dict = {
"jukebox": 512,
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_LYRIC_TOKENS_SIZES
A__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : Tuple , _snake_case : Dict=["v3", "v2", "v2"] , _snake_case : Tuple=512 , _snake_case : Any=5 , _snake_case : List[Any]="<|endoftext|>" , **_snake_case : Union[str, Any] , ):
__lowercase : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
super().__init__(
unk_token=_snake_case , n_genres=_snake_case , version=_snake_case , max_n_lyric_tokens=_snake_case , **_snake_case , )
__lowercase : List[str] = version
__lowercase : Union[str, Any] = max_n_lyric_tokens
__lowercase : Dict = n_genres
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : str = json.load(_snake_case )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Optional[int] = json.load(_snake_case )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Optional[int] = json.load(_snake_case )
__lowercase : Dict = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase : int = oov.replace(r'''\-\'''' , r'''\-+\'''' )
__lowercase : Union[str, Any] = regex.compile(_snake_case )
__lowercase : int = {v: k for k, v in self.artists_encoder.items()}
__lowercase : Tuple = {v: k for k, v in self.genres_encoder.items()}
__lowercase : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def snake_case_ ( self : Tuple ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def snake_case_ ( self : Optional[Any] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def snake_case_ ( self : int , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict ):
__lowercase : Union[str, Any] = [self.artists_encoder.get(_snake_case , 0 ) for artist in list_artists]
for genres in range(len(_snake_case ) ):
__lowercase : Union[str, Any] = [self.genres_encoder.get(_snake_case , 0 ) for genre in list_genres[genres]]
__lowercase : str = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase : int = [[self.lyrics_encoder.get(_snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def snake_case_ ( self : Dict , _snake_case : Any ):
return list(_snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str] , **_snake_case : Optional[int] ):
__lowercase , __lowercase , __lowercase : Optional[int] = self.prepare_for_tokenization(_snake_case , _snake_case , _snake_case )
__lowercase : List[Any] = self._tokenize(_snake_case )
return artist, genre, lyrics
def snake_case_ ( self : str , _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase : Union[str, Any] = artists[idx].lower()
__lowercase : str = [genres[idx].lower()]
else:
__lowercase : Any = self._normalize(artists[idx] ) + '''.v2'''
__lowercase : Tuple = [
self._normalize(_snake_case ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase : Optional[int] = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__lowercase : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__lowercase : List[Any] = {vocab[index]: index + 1 for index in range(len(_snake_case ) )}
__lowercase : List[str] = 0
__lowercase : Any = len(_snake_case ) + 1
__lowercase : str = self.vocab
__lowercase : Union[str, Any] = {v: k for k, v in self.vocab.items()}
__lowercase : Dict = ''''''
else:
__lowercase : Tuple = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__lowercase : List[Any] = self._run_strip_accents(_snake_case )
__lowercase : Tuple = lyrics.replace('''\\''' , '''\n''' )
__lowercase : str = self.out_of_vocab.sub('''''' , _snake_case ), [], []
return artists, genres, lyrics
def snake_case_ ( self : Optional[int] , _snake_case : List[str] ):
__lowercase : Any = unicodedata.normalize('''NFD''' , _snake_case )
__lowercase : Optional[int] = []
for char in text:
__lowercase : Union[str, Any] = unicodedata.category(_snake_case )
if cat == "Mn":
continue
output.append(_snake_case )
return "".join(_snake_case )
def snake_case_ ( self : Optional[int] , _snake_case : str ):
__lowercase : List[str] = (
[chr(_snake_case ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(_snake_case ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(_snake_case ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__lowercase : Optional[Any] = frozenset(_snake_case )
__lowercase : Union[str, Any] = re.compile(r'''_+''' )
__lowercase : Optional[int] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__lowercase : int = pattern.sub('''_''' , _snake_case ).strip('''_''' )
return text
def snake_case_ ( self : List[Any] , _snake_case : List[str] ):
return " ".join(_snake_case )
def snake_case_ ( self : List[str] , _snake_case : Any , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : bool = False ):
# Convert to TensorType
if not isinstance(_snake_case , _snake_case ):
__lowercase : Optional[Any] = TensorType(_snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__lowercase : int = tf.constant
__lowercase : Optional[int] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__lowercase : Union[str, Any] = torch.tensor
__lowercase : Dict = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__lowercase : Union[str, Any] = jnp.array
__lowercase : Optional[int] = _is_jax
else:
__lowercase : Tuple = np.asarray
__lowercase : str = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase : Union[str, Any] = [inputs]
if not is_tensor(_snake_case ):
__lowercase : int = as_tensor(_snake_case )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : str , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple="" , _snake_case : Tuple="pt" ):
__lowercase : List[str] = [0, 0, 0]
__lowercase : List[str] = [artist] * len(self.version )
__lowercase : List[Any] = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase : Tuple = self.tokenize(_snake_case , _snake_case , _snake_case )
__lowercase , __lowercase , __lowercase : List[str] = self._convert_token_to_id(_snake_case , _snake_case , _snake_case )
__lowercase : Optional[Any] = [-INFINITY] * len(full_tokens[-1] )
__lowercase : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : Optional[str] = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase : int = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_snake_case ) )
__lowercase : int = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_snake_case ) )
__lowercase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_snake_case ) )
return (artists_file, genres_file, lyrics_file)
def snake_case_ ( self : str , _snake_case : Tuple , _snake_case : str , _snake_case : Dict ):
__lowercase : List[str] = self.artists_decoder.get(_snake_case )
__lowercase : Optional[Any] = [self.genres_decoder.get(_snake_case ) for genre in genres_index]
__lowercase : Dict = [self.lyrics_decoder.get(_snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 156
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowerCAmelCase : Optional[Any] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> dict:
__lowercase : Optional[Any] = script.contents[0]
__lowercase : int = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[int] ):
__lowercase : Dict = F'https://www.instagram.com/{username}/'
__lowercase : Tuple = self.get_json()
def snake_case_ ( self : Tuple ):
__lowercase : List[Any] = requests.get(self.url , headers=_snake_case ).text
__lowercase : str = BeautifulSoup(_snake_case , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[Any] ):
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : Optional[int] ):
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def snake_case_ ( self : Dict ):
return self.user_data["username"]
@property
def snake_case_ ( self : List[Any] ):
return self.user_data["full_name"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["biography"]
@property
def snake_case_ ( self : Any ):
return self.user_data["business_email"]
@property
def snake_case_ ( self : int ):
return self.user_data["external_url"]
@property
def snake_case_ ( self : Union[str, Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self : Dict ):
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self : Any ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self : int ):
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_verified"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_private"]
def UpperCAmelCase_ ( __lowerCAmelCase = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase : Dict = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = InstagramUser("github")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 156
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> list[int]:
'''simple docstring'''
_snake_case = [True] * limit
_snake_case = False
_snake_case = False
_snake_case = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_snake_case = i * 2
while index < limit:
_snake_case = False
_snake_case = index + i
_snake_case = [2]
for i in range(3 , UpperCamelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase__ )
return primes
def lowerCamelCase__ ( UpperCamelCase__ : int = 1_000_000 ) -> int:
'''simple docstring'''
_snake_case = prime_sieve(UpperCamelCase__ )
_snake_case = 0
_snake_case = 0
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + length , len(UpperCamelCase__ ) ):
_snake_case = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_snake_case = j - i
_snake_case = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 295
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 1
|
import requests
A : Any = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def a__ ( __UpperCamelCase ):
# fetching a list of articles in json format
SCREAMING_SNAKE_CASE_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 118
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A : Any = logging.getLogger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (preds == labels).mean()
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
lowerCamelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
lowerCamelCase__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(__UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , __UpperCamelCase , __UpperCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(__UpperCamelCase )
return results
def a__ ( __UpperCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 118
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 83
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=99 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Dict=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = parent
snake_case : str = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_input_mask
snake_case : List[Any] = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : Optional[Any] = vocab_size
snake_case : List[Any] = embedding_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Union[str, Any] = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : List[Any] = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : Optional[Any] = num_choices
snake_case : Optional[int] = scope
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[int] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = None
snake_case : List[Any] = None
snake_case : int = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
snake_case : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : str = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
snake_case : int = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = self.num_labels
snake_case : List[str] = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
snake_case : Tuple = self.num_labels
snake_case : int = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
snake_case : int = self.num_choices
snake_case : List[Any] = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
snake_case : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
snake_case : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Any = AlbertModelTester(self )
snake_case : Any = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = AlbertModel.from_pretrained('''albert-base-v2''' )
snake_case : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
snake_case : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
snake_case : Any = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
| 83
| 1
|
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase_ : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 262
|
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 262
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __magic_name__ :
def __init__( self : str ,_UpperCAmelCase : int ,):
_a : List[str] = parent
_a : str = 13
_a : Tuple = 7
_a : Optional[Any] = 30
_a : str = self.seq_length + self.mem_len
_a : Union[str, Any] = 15
_a : int = True
_a : List[str] = True
_a : Optional[int] = 99
_a : Optional[int] = [10, 50, 80]
_a : Optional[int] = 32
_a : Union[str, Any] = 32
_a : Any = 4
_a : Optional[int] = 8
_a : Any = 128
_a : Dict = 2
_a : List[Any] = 2
_a : Dict = None
_a : int = 1
_a : Optional[int] = 0
_a : List[Any] = 3
_a : Dict = self.vocab_size - 1
_a : Tuple = 0.01
def __lowercase ( self : List[str] ):
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Tuple = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : str = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def __lowercase ( self : Optional[Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __lowercase ( self : List[str] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ):
_a : Optional[int] = TFTransfoXLModel(lowerCamelCase_ )
_a : str = model(lowerCamelCase_ ).to_tuple()
_a : Dict = {"""input_ids""": input_ids_a, """mems""": mems_a}
_a : List[str] = model(lowerCamelCase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ):
_a : Tuple = TFTransfoXLLMHeadModel(lowerCamelCase_ )
_a : Dict = model(lowerCamelCase_ ).to_tuple()
_a : Union[str, Any] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_a : List[str] = model(lowerCamelCase_ ).to_tuple()
_a : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_a : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_a : int = model(lowerCamelCase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ):
_a : Any = TFTransfoXLForSequenceClassification(lowerCamelCase_ )
_a : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : int ):
_a : str = self.prepare_config_and_inputs()
(_a) : Any = config_and_inputs
_a : Optional[int] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __magic_name__ ( __snake_case , __snake_case , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase : int = () if is_tf_available() else ()
lowerCAmelCase : List[Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCAmelCase : str = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : str = False
lowerCAmelCase : str = False
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __lowercase ( self : Union[str, Any] ):
_a : Union[str, Any] = TFTransfoXLModelTester(self )
_a : int = ConfigTester(self ,config_class=lowerCamelCase_ ,d_embed=37 )
def __lowercase ( self : int ):
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
self.model_tester.set_seed()
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase_ )
def __lowercase ( self : str ):
self.model_tester.set_seed()
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase_ )
def __lowercase ( self : Tuple ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase_ )
def __lowercase ( self : int ):
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_a : List[Any] = model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_a : Dict = model.get_output_embeddings()
assert isinstance(lowerCamelCase_ ,tf.keras.layers.Layer )
_a : Union[str, Any] = model.get_bias()
assert name is None
else:
_a : Union[str, Any] = model.get_output_embeddings()
assert x is None
_a : Optional[int] = model.get_bias()
assert name is None
def __lowercase ( self : Dict ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __lowercase ( self : Dict ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[Any] = TFTransfoXLModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def __lowercase ( self : Union[str, Any] ):
pass
@require_tf
class __magic_name__ ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def __lowercase ( self : str ):
_a : Optional[int] = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
_a : int = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_a : Union[str, Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_a : Tuple = model.generate(lowerCamelCase_ ,max_length=200 ,do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].numpy().tolist() ,lowerCamelCase_ )
| 365
|
'''simple docstring'''
__lowerCAmelCase = range(2, 20 + 1)
__lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase = {}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : Optional[int] = sum(a_i[j] for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) )
_a : List[str] = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) )
_a , _a : Any = 0, 0
_a : Any = n - i
_a : List[Any] = memo.get(lowerCAmelCase_ )
if sub_memo is not None:
_a : Tuple = sub_memo.get(lowerCAmelCase_ )
if jumps is not None and len(lowerCAmelCase_ ) > 0:
# find and make the largest jump without going over
_a : Any = -1
for _k in range(len(lowerCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_a : Any = _k
break
if max_jump >= 0:
_a , _a , _a : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
_a : Union[str, Any] = diff + c
for j in range(min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) ):
_a , _a : Dict = divmod(lowerCAmelCase_ , 10 )
if new_c > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_a : Tuple = []
else:
_a : Any = {c: []}
_a : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_a , _a : Dict = next_term(lowerCAmelCase_ , k - 1 , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_a , _a : Any = compute(lowerCAmelCase_ , lowerCAmelCase_ , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
_a : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
_a : Any = 0
while j < len(lowerCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if i >= n:
return 0, i
if k > len(lowerCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_a : Any = i
_a , _a , _a : Optional[int] = 0, 0, 0
for j in range(len(lowerCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_a : Any = ds_c + ds_b
diff += addend
_a : int = 0
for j in range(lowerCAmelCase_ ):
_a : Optional[Any] = a_i[j] + addend
_a , _a : Tuple = divmod(lowerCAmelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return diff, i - start_i
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
_a : Optional[Any] = digits[j] + addend
if s >= 10:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
_a : List[str] = addend // 10 + quotient
else:
_a : Optional[Any] = s
_a : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
digits.append(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 10**15 ) -> int:
_a : Dict = [1]
_a : int = 1
_a : Tuple = 0
while True:
_a , _a : str = next_term(lowerCAmelCase_ , 20 , i + dn , lowerCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_a : Union[str, Any] = 0
for j in range(len(lowerCAmelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 107
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCamelCase = 250004
__UpperCamelCase = 250020
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = MBartTokenizer
SCREAMING_SNAKE_CASE_ : str = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Dict = True
def __A ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __A ( self ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE_ : str = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE_ : int = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE_ : str = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __A ( cls ) -> Dict:
SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
SCREAMING_SNAKE_CASE = 1
return cls
def __A ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def __A ( self ) -> Any:
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_026, 250_001] )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='pt' )
SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='pt' )
SCREAMING_SNAKE_CASE = targets['input_ids']
SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 113
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''timesformer'''
def __init__( self : Any , UpperCamelCase__ : Optional[int]=224 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : str=8 , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : int=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : str=1E-6 , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple="divided_space_time" , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : str , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =num_frames
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =qkv_bias
__UpperCamelCase =attention_type
__UpperCamelCase =drop_path_rate
| 85
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowercase = {
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
__UpperCamelCase =getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
__UpperCamelCase =add_prefix_space
__UpperCamelCase =pre_tok_class(**UpperCamelCase__ )
__UpperCamelCase =add_prefix_space
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
__UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
__UpperCamelCase =input_ids[-self.model_max_length :]
return input_ids
| 85
| 1
|
class A : # Public class to implement a graph
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= row
__lowercase= col
__lowercase= graph
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Checking all 8 elements surrounding nth element
__lowercase= [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowercase= [-1, 0, 1, -1, 1, -1, 0, 1]
__lowercase= True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase )
def _A (self ): # And finally, count all islands.
__lowercase= [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowercase= 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
count += 1
return count
| 295
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295
| 1
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Optional[Any] = 'pt'
elif is_tf_available():
__A : Optional[int] = 'tf'
else:
__A : List[Any] = 'jax'
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Any = ByTaTokenizer
lowercase : Optional[int] = False
def a__ ( self :List[str] ):
super().setUp()
snake_case_ : Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self :Tuple ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def a__ ( self :Any ,**_UpperCamelCase :Union[str, Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Union[str, Any]=2_0 ,_UpperCamelCase :str=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
snake_case_ : List[Any] = []
for i in range(len(_UpperCamelCase ) ):
try:
snake_case_ : Any = tokenizer.decode([i] ,clean_up_tokenization_spaces=_UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case_ : Optional[int] = list(filter(lambda _UpperCamelCase : re.match(R"""^[ a-zA-Z]+$""" ,t[1] ) ,_UpperCamelCase ) )
snake_case_ : int = list(filter(lambda _UpperCamelCase : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=_UpperCamelCase ) ,_UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
snake_case_ : Dict = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
snake_case_ : Any = toks + toks
# toks_str = [t[1] for t in toks]
snake_case_ : Tuple = [t[0] for t in toks]
# Ensure consistency
snake_case_ : Optional[Any] = tokenizer.decode(_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
snake_case_ : Dict = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=_UpperCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
snake_case_ : str = """ """ + output_txt
snake_case_ : str = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def a__ ( self :Optional[Any] ):
snake_case_ : int = self.ta_base_tokenizer
snake_case_ : Dict = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
snake_case_ : Union[str, Any] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] ,batch_without_eos_added["""input_ids"""] )
def a__ ( self :int ):
snake_case_ : str = self.ta_base_tokenizer
snake_case_ : List[Any] = """Unicode €."""
snake_case_ : Any = tokenizer(_UpperCamelCase )
snake_case_ : int = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["""input_ids"""] ,_UpperCamelCase )
# decoding
snake_case_ : Union[str, Any] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,"""Unicode €.</s>""" )
snake_case_ : Optional[int] = tokenizer("""e è é ê ë""" )
snake_case_ : List[Any] = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["""input_ids"""] ,_UpperCamelCase )
# decoding
snake_case_ : Optional[int] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,"""e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) ,"""e è é ê ë</s>""" )
def a__ ( self :Optional[Any] ):
snake_case_ : List[Any] = self.ta_base_tokenizer
snake_case_ : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
snake_case_ : List[Any] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
snake_case_ : Union[str, Any] = tokenizer(_UpperCamelCase ,padding=_UpperCamelCase ,return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
if FRAMEWORK != "jax":
snake_case_ : List[str] = list(batch.input_ids.numpy()[0] )
else:
snake_case_ : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
self.assertEqual((2, 3_7) ,batch.input_ids.shape )
self.assertEqual((2, 3_7) ,batch.attention_mask.shape )
def a__ ( self :Dict ):
snake_case_ : Any = self.ta_base_tokenizer
snake_case_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case_ : Dict = tokenizer(_UpperCamelCase ,padding=_UpperCamelCase ,return_tensors=_UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" ,_UpperCamelCase )
self.assertIn("""attention_mask""" ,_UpperCamelCase )
self.assertNotIn("""decoder_input_ids""" ,_UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" ,_UpperCamelCase )
def a__ ( self :Optional[int] ):
snake_case_ : Optional[Any] = self.ta_base_tokenizer
snake_case_ : str = [
"""Summary of the text.""",
"""Another summary.""",
]
snake_case_ : int = tokenizer(
text_target=_UpperCamelCase ,max_length=3_2 ,padding="""max_length""" ,truncation=_UpperCamelCase ,return_tensors=_UpperCamelCase )
self.assertEqual(3_2 ,targets["""input_ids"""].shape[1] )
def a__ ( self :Union[str, Any] ):
snake_case_ : int = self.ta_base_tokenizer
snake_case_ : int = ["""A long paragraph for summarization. </s>"""]
snake_case_ : Any = ["""Summary of the text. </s>"""]
# fmt: off
snake_case_ : List[str] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
snake_case_ : List[str] = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
snake_case_ : Dict = tokenizer(_UpperCamelCase ,text_target=_UpperCamelCase )
self.assertEqual(_UpperCamelCase ,batch["""input_ids"""][0] )
self.assertEqual(_UpperCamelCase ,batch["""labels"""][0] )
def a__ ( self :int ):
# safety check on max_len default value so we are sure the test works
snake_case_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,4_2 )
# Now let's start the test
snake_case_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : int = """ He is very happy, UNwant\u00E9d,running"""
snake_case_ : str = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ : List[str] = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ : Union[str, Any] = after_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
snake_case_ : Tuple = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
snake_case_ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
snake_case_ : str = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ : Optional[int] = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ : Optional[Any] = after_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
self.assertIn("""new_additional_special_token""" ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,4_2 )
snake_case_ : List[str] = tokenizer.__class__.from_pretrained(_UpperCamelCase ,model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length ,4_3 )
shutil.rmtree(_UpperCamelCase )
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase ,"""special_tokens_map.json""" ) ,encoding="""utf-8""" ) as json_file:
snake_case_ : str = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase ,"""tokenizer_config.json""" ) ,encoding="""utf-8""" ) as json_file:
snake_case_ : Any = json.load(_UpperCamelCase )
snake_case_ : Any = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
snake_case_ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
snake_case_ : List[str] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_UpperCamelCase ,"""special_tokens_map.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile:
json.dump(_UpperCamelCase ,_UpperCamelCase )
with open(os.path.join(_UpperCamelCase ,"""tokenizer_config.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile:
json.dump(_UpperCamelCase ,_UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ : str = tokenizer_class.from_pretrained(
_UpperCamelCase ,)
self.assertIn(
"""an_additional_special_token""" ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ : List[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" ,lstrip=_UpperCamelCase )]
snake_case_ : Optional[int] = tokenizer_class.from_pretrained(
_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,)
self.assertIn("""a_new_additional_special_token""" ,tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) ,)
def a__ ( self :int ):
snake_case_ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
snake_case_ : Optional[Any] = tokenizer_class.from_pretrained(_UpperCamelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == """""" )
def a__ ( self :Tuple ):
pass
def a__ ( self :Any ):
pass
def a__ ( self :List[Any] ):
pass
def a__ ( self :str ):
pass
def a__ ( self :Dict ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
snake_case_ : List[Any] = self.get_tokenizers(fast=_UpperCamelCase ,do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Any ):
snake_case_ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : List[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
snake_case_ : Tuple = 0
snake_case_ : int = tokenizer.convert_ids_to_tokens(
_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
for attr in attributes_list:
setattr(_UpperCamelCase ,attr + """_id""" ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,attr + """_id""" ) ,_UpperCamelCase )
setattr(_UpperCamelCase ,attr + """_id""" ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase ,attr + """_id""" ) ,_UpperCamelCase )
setattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ,[] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens""" ) ,[] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ) ,[] )
setattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ,[token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens""" ) ,[token_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase ,"""additional_special_tokens_ids""" ) ,[token_id_to_test_setters] )
| 8
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Dict = StableDiffusionInpaintPipeline
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Optional[int] = frozenset([] )
def a__ ( self :Any ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,)
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,)
snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self :Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a__ ( self :Tuple ):
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" )
snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 8
| 1
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 83
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__SCREAMING_SNAKE_CASE ={
"yjernite/retribert-base-uncased": 512,
}
__SCREAMING_SNAKE_CASE ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RetriBertTokenizer
lowercase = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars
):
lowercase_ : List[str] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) )
lowercase_ : Tuple = do_lower_case
lowercase_ : List[Any] = strip_accents
lowercase_ : Dict = tokenize_chinese_chars
lowercase_ : Any = normalizer_class(**__UpperCamelCase )
lowercase_ : str = do_lower_case
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
lowercase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Dict = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : str = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 363
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
a : Dict = None
a : Optional[int] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
a : Union[str, Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=1, __UpperCAmelCase=256 ) -> Dict:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
with open(__UpperCAmelCase, '''r''' ) as f:
return json.load(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
with open(__UpperCAmelCase, '''w''' ) as f:
json.dump(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=True ) -> Optional[int]:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp''' )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
snake_case_ = read_json(os.path.join(__UpperCAmelCase, '''params.json''' ) )
snake_case_ = NUM_SHARDS[model_size]
snake_case_ = params['''n_layers''']
snake_case_ = params['''n_heads''']
snake_case_ = n_heads // num_shards
snake_case_ = params['''dim''']
snake_case_ = dim // n_heads
snake_case_ = 1_0_0_0_0.0
snake_case_ = 1.0 / (base ** (torch.arange(0, __UpperCAmelCase, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ = n_heads_per_shard // num_key_value_heads
snake_case_ = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ = n_heads
snake_case_ = n_heads_per_shard
snake_case_ = dim
# permute for sliced rotary
def permute(__UpperCAmelCase, __UpperCAmelCase=n_heads, __UpperCAmelCase=dim, __UpperCAmelCase=dim ):
return w.view(__UpperCAmelCase, dima // n_heads // 2, 2, __UpperCAmelCase ).transpose(1, 2 ).reshape(__UpperCAmelCase, __UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ = torch.load(os.path.join(__UpperCAmelCase, '''consolidated.00.pth''' ), map_location='''cpu''' )
else:
# Sharded
snake_case_ = [
torch.load(os.path.join(__UpperCAmelCase, F"consolidated.{i:02d}.pth" ), map_location='''cpu''' )
for i in range(__UpperCAmelCase )
]
snake_case_ = 0
snake_case_ = {'''weight_map''': {}}
for layer_i in range(__UpperCAmelCase ):
snake_case_ = F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
snake_case_ = {
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ = {
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
snake_case_ = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
for i in range(__UpperCAmelCase )
], dim=0, ).reshape(__UpperCAmelCase, __UpperCAmelCase ) )
snake_case_ = permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
for i in range(__UpperCAmelCase )
], dim=0, ).reshape(__UpperCAmelCase, __UpperCAmelCase ), __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, )
snake_case_ = torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
for i in range(__UpperCAmelCase )
], dim=0, ).reshape(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(__UpperCAmelCase )], dim=1 )
snake_case_ = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__UpperCAmelCase )], dim=0 )
snake_case_ = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__UpperCAmelCase )], dim=1 )
snake_case_ = torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__UpperCAmelCase )], dim=0 )
snake_case_ = inv_freq
for k, v in state_dict.items():
snake_case_ = filename
param_count += v.numel()
torch.save(__UpperCAmelCase, os.path.join(__UpperCAmelCase, __UpperCAmelCase ) )
snake_case_ = F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
snake_case_ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__UpperCAmelCase )], dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__UpperCAmelCase )], dim=0 ),
}
for k, v in state_dict.items():
snake_case_ = filename
param_count += v.numel()
torch.save(__UpperCAmelCase, os.path.join(__UpperCAmelCase, __UpperCAmelCase ) )
# Write configs
snake_case_ = {'''total_size''': param_count * 2}
write_json(__UpperCAmelCase, os.path.join(__UpperCAmelCase, '''pytorch_model.bin.index.json''' ) )
snake_case_ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ = LlamaConfig(
hidden_size=__UpperCAmelCase, intermediate_size=compute_intermediate_size(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), num_attention_heads=params['''n_heads'''], num_hidden_layers=params['''n_layers'''], rms_norm_eps=params['''norm_eps'''], num_key_value_heads=__UpperCAmelCase, )
config.save_pretrained(__UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ = LlamaForCausalLM.from_pretrained(__UpperCAmelCase, torch_dtype=torch.floataa, low_cpu_mem_usage=__UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__UpperCAmelCase, safe_serialization=__UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
snake_case_ = tokenizer_class(__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''', help='''Location of LLaMA weights, which contains tokenizer.model and model folders''', )
parser.add_argument(
'''--model_size''', choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''], )
parser.add_argument(
'''--output_dir''', help='''Location to write HF model and tokenizer''', )
parser.add_argument('''--safe_serialization''', type=__UpperCAmelCase, help='''Whether or not to save using `safetensors`.''' )
snake_case_ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
snake_case_ = os.path.join(args.input_dir, '''tokenizer.model''' )
write_tokenizer(args.output_dir, __UpperCAmelCase )
if __name__ == "__main__":
main()
| 56
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 107
| 0
|
"""simple docstring"""
from PIL import Image
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = image.size
_a : List[str] = 0
_a : Union[str, Any] = image.load()
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_a : Any = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
_a : Dict = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 324
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger("transformers.models.speecht5")
_SCREAMING_SNAKE_CASE : int = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_SCREAMING_SNAKE_CASE : Tuple = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_SCREAMING_SNAKE_CASE : int = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_SCREAMING_SNAKE_CASE : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE : List[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE : int = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : str = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_SCREAMING_SNAKE_CASE : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_SCREAMING_SNAKE_CASE : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def UpperCamelCase_( snake_case : Any , snake_case : Any , snake_case : Union[str, Any] , snake_case : Any , snake_case : str ):
'''simple docstring'''
for attribute in key.split("." ):
snake_case_ = getattr(snake_case , snake_case )
if weight_type is not None:
snake_case_ = getattr(snake_case , snake_case ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCamelCase_( snake_case : Tuple , snake_case : Optional[int] ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_ , snake_case_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase_( snake_case : int , snake_case : List[Any] , snake_case : str ):
'''simple docstring'''
snake_case_ = []
if task == "s2t":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2T
snake_case_ = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case_ = None
snake_case_ = MAPPING_T2S
snake_case_ = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2S
snake_case_ = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f'{name} was ignored' )
continue
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case_ , snake_case_ = key.split(".*." )
if prefix in name and suffix in name:
snake_case_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(snake_case )[0].split("." )[-2]
snake_case_ = mapped_key.replace("*" , snake_case )
if "weight_g" in name:
snake_case_ = "weight_g"
elif "weight_v" in name:
snake_case_ = "weight_v"
elif "bias" in name:
snake_case_ = "bias"
elif "weight" in name:
snake_case_ = "weight"
elif "running_mean" in name:
snake_case_ = "running_mean"
elif "running_var" in name:
snake_case_ = "running_var"
elif "num_batches_tracked" in name:
snake_case_ = "num_batches_tracked"
else:
snake_case_ = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Dict ):
'''simple docstring'''
snake_case_ = full_name.split("conv_layers." )[-1]
snake_case_ = name.split("." )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def UpperCamelCase_( snake_case : Dict , snake_case : int , snake_case : Dict , snake_case : Union[str, Any]=None , snake_case : Dict=None , snake_case : List[Any]=None , ):
'''simple docstring'''
if config_path is not None:
snake_case_ = SpeechTaConfig.from_pretrained(snake_case )
else:
snake_case_ = SpeechTaConfig()
if task == "s2t":
snake_case_ = config.max_text_positions
snake_case_ = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
snake_case_ = 1_8_7_6
snake_case_ = 6_0_0
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
snake_case_ = 1_8_7_6
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
snake_case_ = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case_ = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case )
snake_case_ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
snake_case_ = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 85
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85
| 1
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Optional[Any] ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='test-config' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='valid_org/test-config-org' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : Tuple = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
SCREAMING_SNAKE_CASE_ : Optional[int] = c.n_embd + 1 # int
SCREAMING_SNAKE_CASE_ : int = c.resid_pdrop + 1.0 # float
SCREAMING_SNAKE_CASE_ : Union[str, Any] = not c.scale_attn_weights # bool
SCREAMING_SNAKE_CASE_ : str = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = PretrainedConfig()
SCREAMING_SNAKE_CASE_ : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
SCREAMING_SNAKE_CASE_ : Optional[int] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(lowerCAmelCase__ )}.''' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE_ : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = mock.Mock()
SCREAMING_SNAKE_CASE_ : str = 5_0_0
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : List[str] = HTTPError
SCREAMING_SNAKE_CASE_ : str = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase__ ) as mock_head:
SCREAMING_SNAKE_CASE_ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
SCREAMING_SNAKE_CASE_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
SCREAMING_SNAKE_CASE_ : Optional[int] = ['config.42.0.0.json']
SCREAMING_SNAKE_CASE_ : Any = 7_6_8
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , os.path.join(lowerCAmelCase__ , 'config.42.0.0.json' ) )
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
SCREAMING_SNAKE_CASE_ : str = 'v4.0.0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'v3.0.0'
SCREAMING_SNAKE_CASE_ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 369
|
from collections.abc import Sequence
def a__ ( A__, A__ = False ):
if not arr:
return 0
SCREAMING_SNAKE_CASE_ : str = 0 if allow_empty_subarrays else float('-inf' )
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
for num in arr:
SCREAMING_SNAKE_CASE_ : int = max(0 if allow_empty_subarrays else num, curr_sum + num )
SCREAMING_SNAKE_CASE_ : List[Any] = max(A__, A__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ : Union[str, Any] =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 162
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_ = '''pt'''
elif is_tf_available():
lowerCAmelCase_ = '''tf'''
else:
lowerCAmelCase_ = '''jax'''
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ByTaTokenizer
SCREAMING_SNAKE_CASE : str = False
def snake_case__( self : Optional[Any] ) ->Any:
super().setUp()
snake_case_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__( self : List[str] ) ->Any:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def snake_case__( self : str , **_UpperCamelCase : Optional[Any] ) ->ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : str=2_0 , _UpperCamelCase : Any=5 ) ->Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
snake_case_ = []
for i in range(len(_UpperCamelCase ) ):
try:
snake_case_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case_ = list(filter(lambda _UpperCamelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _UpperCamelCase ) )
snake_case_ = list(filter(lambda _UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCamelCase ) , _UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
snake_case_ = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
snake_case_ = toks + toks
# toks_str = [t[1] for t in toks]
snake_case_ = [t[0] for t in toks]
# Ensure consistency
snake_case_ = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
snake_case_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
snake_case_ = ''' ''' + output_txt
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def snake_case__( self : Optional[int] ) ->Optional[Any]:
snake_case_ = self.ta_base_tokenizer
snake_case_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
snake_case_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = self.ta_base_tokenizer
snake_case_ = '''Unicode €.'''
snake_case_ = tokenizer(_UpperCamelCase )
snake_case_ = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['''input_ids'''] , _UpperCamelCase )
# decoding
snake_case_ = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '''Unicode €.</s>''' )
snake_case_ = tokenizer('''e è é ê ë''' )
snake_case_ = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['''input_ids'''] , _UpperCamelCase )
# decoding
snake_case_ = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = self.ta_base_tokenizer
snake_case_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
snake_case_ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
snake_case_ = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
if FRAMEWORK != "jax":
snake_case_ = list(batch.input_ids.numpy()[0] )
else:
snake_case_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = self.ta_base_tokenizer
snake_case_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
snake_case_ = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _UpperCamelCase )
self.assertIn('''attention_mask''' , _UpperCamelCase )
self.assertNotIn('''decoder_input_ids''' , _UpperCamelCase )
self.assertNotIn('''decoder_attention_mask''' , _UpperCamelCase )
def snake_case__( self : Dict ) ->List[str]:
snake_case_ = self.ta_base_tokenizer
snake_case_ = [
'''Summary of the text.''',
'''Another summary.''',
]
snake_case_ = tokenizer(
text_target=_UpperCamelCase , max_length=3_2 , padding='''max_length''' , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = self.ta_base_tokenizer
snake_case_ = ['''A long paragraph for summarization. </s>''']
snake_case_ = ['''Summary of the text. </s>''']
# fmt: off
snake_case_ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
snake_case_ = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
snake_case_ = tokenizer(_UpperCamelCase , text_target=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch['''input_ids'''][0] )
self.assertEqual(_UpperCamelCase , batch['''labels'''][0] )
def snake_case__( self : Optional[Any] ) ->int:
# safety check on max_len default value so we are sure the test works
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ''' He is very happy, UNwant\u00E9d,running'''
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
snake_case_ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
snake_case_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
snake_case_ = tokenizer.__class__.from_pretrained(_UpperCamelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
snake_case_ = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
snake_case_ = json.load(_UpperCamelCase )
snake_case_ = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
snake_case_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
snake_case_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_UpperCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ = tokenizer_class.from_pretrained(
_UpperCamelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_UpperCamelCase )]
snake_case_ = tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def snake_case__( self : str ) ->List[str]:
snake_case_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_class.from_pretrained(_UpperCamelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '''''' )
def snake_case__( self : Tuple ) ->Optional[int]:
pass
def snake_case__( self : int ) ->str:
pass
def snake_case__( self : Tuple ) ->str:
pass
def snake_case__( self : int ) ->Optional[Any]:
pass
def snake_case__( self : Tuple ) ->Dict:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
snake_case_ = self.get_tokenizers(fast=_UpperCamelCase , do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
snake_case_ = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
snake_case_ = 0
snake_case_ = tokenizer.convert_ids_to_tokens(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
for attr in attributes_list:
setattr(_UpperCamelCase , attr + '''_id''' , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + '''_id''' ) , _UpperCamelCase )
setattr(_UpperCamelCase , attr + '''_id''' , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + '''_id''' ) , _UpperCamelCase )
setattr(_UpperCamelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens_ids''' ) , [] )
setattr(_UpperCamelCase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 8
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 8
| 1
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : int = 2
@add_end_docstrings(_UpperCamelCase )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Optional[int] , *A : Union[str, Any] , **A : List[Any] ):
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCAmelCase : List[Any] = None
if self.model.config.prefix is not None:
_UpperCAmelCase : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCAmelCase : Union[str, Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=A , **self._forward_params )
_UpperCAmelCase : Any = {**self._preprocess_params, **preprocess_params}
_UpperCAmelCase : Any = {**self._forward_params, **forward_params}
def snake_case_ ( self : Union[str, Any] , A : Any=None , A : List[Any]=None , A : Dict=None , A : List[Any]=None , A : str=None , A : Dict=None , A : List[Any]=None , A : Optional[Any]=None , **A : Tuple , ):
_UpperCAmelCase : int = {}
if prefix is not None:
_UpperCAmelCase : Dict = prefix
if prefix:
_UpperCAmelCase : str = self.tokenizer(
A , padding=A , add_special_tokens=A , return_tensors=self.framework )
_UpperCAmelCase : Dict = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
" [None, 'hole']" )
_UpperCAmelCase : Any = handle_long_generation
preprocess_params.update(A )
_UpperCAmelCase : Tuple = generate_kwargs
_UpperCAmelCase : int = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
_UpperCAmelCase : Dict = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
_UpperCAmelCase : Optional[Any] = ReturnType.TENSORS
if return_type is not None:
_UpperCAmelCase : Dict = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase : Dict = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_UpperCAmelCase : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case_ ( self : List[str] , *A : Tuple , **A : Tuple ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*A , **A )
def __call__( self : str , A : Optional[Any] , **A : str ):
return super().__call__(A , **A )
def snake_case_ ( self : Any , A : Optional[int] , A : Union[str, Any]="" , A : Tuple=None , **A : str ):
_UpperCAmelCase : Tuple = self.tokenizer(
prefix + prompt_text , padding=A , add_special_tokens=A , return_tensors=self.framework )
_UpperCAmelCase : Dict = prompt_text
if handle_long_generation == "hole":
_UpperCAmelCase : Union[str, Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCAmelCase : Any = generate_kwargs["max_new_tokens"]
else:
_UpperCAmelCase : List[Any] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCAmelCase : Tuple = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
_UpperCAmelCase : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCAmelCase : Dict = inputs["attention_mask"][:, -keep_length:]
return inputs
def snake_case_ ( self : Union[str, Any] , A : Tuple , **A : Tuple ):
_UpperCAmelCase : Dict = model_inputs["input_ids"]
_UpperCAmelCase : Tuple = model_inputs.get("attention_mask" , A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : List[Any] = 1
else:
_UpperCAmelCase : Optional[int] = input_ids.shape[0]
_UpperCAmelCase : List[str] = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCAmelCase : Any = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
_UpperCAmelCase : Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCAmelCase : str = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCAmelCase : List[Any] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCAmelCase : int = self.model.generate(input_ids=A , attention_mask=A , **A )
_UpperCAmelCase : Dict = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCAmelCase : Dict = generated_sequence.reshape(A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase : Optional[int] = tf.reshape(A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case_ ( self : Optional[int] , A : Optional[Any] , A : Optional[int]=ReturnType.FULL_TEXT , A : Union[str, Any]=True ):
_UpperCAmelCase : List[str] = model_outputs["generated_sequence"][0]
_UpperCAmelCase : Optional[int] = model_outputs["input_ids"]
_UpperCAmelCase : Union[str, Any] = model_outputs["prompt_text"]
_UpperCAmelCase : List[Any] = generated_sequence.numpy().tolist()
_UpperCAmelCase : Dict = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCAmelCase : int = self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCAmelCase : Any = 0
else:
_UpperCAmelCase : Union[str, Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCAmelCase : Any = prompt_text + text[prompt_length:]
else:
_UpperCAmelCase : Any = text[prompt_length:]
_UpperCAmelCase : Any = {"generated_text": all_text}
records.append(A )
return records
| 358
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase : str = "huggingface-tools/default-prompts"
_lowerCAmelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int="run" ) -> int:
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 202
| 0
|
"""simple docstring"""
import qiskit
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> qiskit.result.counts.Counts:
SCREAMING_SNAKE_CASE__ : int = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE__ : Dict = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ : Dict = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
a :Any = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 132
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
a__ =BlenderbotConfig
a__ ={}
a__ ='''gelu'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=False , A=9_9 , A=3_2 , A=2 , A=4 , A=3_7 , A=0.1 , A=0.1 , A=2_0 , A=2 , A=1 , A=0 , ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = eos_token_id
_UpperCAmelCase : Dict = pad_token_id
_UpperCAmelCase : List[str] = bos_token_id
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : Dict = prepare_blenderbot_inputs_dict(A , A , A )
return config, inputs_dict
def __lowerCAmelCase ( self , A , A ) -> Tuple:
_UpperCAmelCase : List[Any] = TFBlenderbotModel(config=A ).get_decoder()
_UpperCAmelCase : Any = inputs_dict['''input_ids''']
_UpperCAmelCase : List[str] = input_ids[:1, :]
_UpperCAmelCase : List[Any] = inputs_dict['''attention_mask'''][:1, :]
_UpperCAmelCase : Any = inputs_dict['''head_mask''']
_UpperCAmelCase : Union[str, Any] = 1
# first forward pass
_UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , head_mask=A , use_cache=A )
_UpperCAmelCase , _UpperCAmelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase : str = model(A , attention_mask=A )[0]
_UpperCAmelCase : str = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1E-3 )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , ):
if attention_mask is None:
_UpperCAmelCase : Union[str, Any] = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
a__ =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
a__ =(
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ =True
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : int = TFBlenderbotModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A )
def __lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =['''My friends are cool but they eat too many carbs.''']
a__ ='''facebook/blenderbot-400M-distill'''
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = self.tokenizer(self.src_text , return_tensors='''tf''' )
_UpperCAmelCase : int = self.model.generate(
model_inputs.input_ids , )
_UpperCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 68
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase :List[Any] = 16
_lowerCAmelCase :Tuple = 32
def lowerCamelCase_ (UpperCamelCase__ : Accelerator , UpperCamelCase__ : DatasetDict , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int = 16 ):
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCAmelCase : Union[str, Any] = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase__ ),
'''validation''': dataset['''train'''].select(UpperCamelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : str = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : int = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : List[str] = 8
else:
_UpperCAmelCase : Optional[int] = None
return tokenizer.pad(
UpperCamelCase__ , padding='''longest''' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCAmelCase : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
# New Code #
_UpperCAmelCase : Optional[Any] = []
# Download the dataset
_UpperCAmelCase : str = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[str] = config['''lr''']
_UpperCAmelCase : Union[str, Any] = int(config['''num_epochs'''] )
_UpperCAmelCase : Dict = int(config['''seed'''] )
_UpperCAmelCase : Optional[int] = int(config['''batch_size'''] )
_UpperCAmelCase : List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : str = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
# New Code #
# Create our folds:
_UpperCAmelCase : Optional[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_UpperCAmelCase : str = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = get_fold_dataloaders(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Optional[Any] = model(**UpperCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.loss
_UpperCAmelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : int = model(**UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_UpperCAmelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : int = []
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
_UpperCAmelCase : Dict = outputs.logits
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : Tuple = torch.cat(UpperCamelCase__ , dim=0 )
_UpperCAmelCase : List[str] = torch.stack(UpperCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[Any] = metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase__ )
def lowerCamelCase_ ():
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 68
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowercase__ : int = logging.getLogger(__name__)
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''', type=lowercase, default='''wikitext''', help='''Name of the training. Explore datasets at: hf.co/datasets.''', )
parser.add_argument(
'''--dataset_config''', type=lowercase, default='''wikitext-103-raw-v1''', help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''', type=lowercase, default='''sayakpaul/unigram-tokenizer-wikitext''', help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''', )
parser.add_argument(
'''--shard_size''', type=lowercase, default=1000, help='''Number of entries to go in a single shard.''', )
parser.add_argument('''--split''', type=lowercase, default='''train''', choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''', default=lowercase, type=lowercase, help='''Limit the number of shards (used for debugging).''', )
parser.add_argument(
'''--max_length''', type=lowercase, default=512, help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''', )
parser.add_argument(
'''--output_dir''', default='''tf-tpu''', type=lowercase, help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''', )
_UpperCamelCase = parser.parse_args()
return args
def a__ ( lowercase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def fn(lowercase : List[str] ):
return tokenizer(examples['''text'''] )
return fn
def a__ ( lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
_UpperCamelCase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
_UpperCamelCase = tf.train.Features(feature=lowercase )
_UpperCamelCase = tf.train.Example(features=lowercase )
_UpperCamelCase = example.SerializeToString()
records.append(lowercase )
return records
def a__ ( lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
_UpperCamelCase = min(len(lowercase ), args.limit )
_UpperCamelCase = dataset.select(range(lowercase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
_UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCamelCase = os.path.join(args.output_dir, args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
_UpperCamelCase = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
_UpperCamelCase = tokenize_function(lowercase )
_UpperCamelCase = dataset.map(lowercase, batched=lowercase, num_proc=4, remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase : Optional[Any] ):
# Concatenate all texts.
_UpperCamelCase = {k: sum(examples[k], [] ) for k in examples.keys()}
_UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0, lowercase, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCamelCase = dataset_tokenized.map(lowercase, batched=lowercase, batch_size=1000, num_proc=4 )
_UpperCamelCase = 0
_UpperCamelCase = 0
for shard in range(0, len(lowercase ), args.shard_size ):
_UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCamelCase = len(dataset_snapshot['''input_ids'''] )
_UpperCamelCase = os.path.join(lowercase, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_UpperCamelCase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
_UpperCamelCase = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase, lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""", file=lowercase )
if __name__ == "__main__":
lowercase__ : str = parse_args()
main(args)
| 324
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
_lowerCAmelCase : Dict = """vit_msn"""
def __init__( self , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-06 , lowerCAmelCase=2_24 , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_A )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = qkv_bias
| 364
|
"""simple docstring"""
import cmath
import math
def lowerCAmelCase__ ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> complex:
"""simple docstring"""
snake_case = math.radians(_UpperCamelCase )
snake_case = math.radians(_UpperCamelCase )
# Convert voltage and current to rectangular form
snake_case = cmath.rect(_UpperCamelCase , _UpperCamelCase )
snake_case = cmath.rect(_UpperCamelCase , _UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
| 0
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> list[int]:
"""simple docstring"""
a_ : str = str(UpperCAmelCase__ )
a_ : List[Any] = [n]
for i in range(1 , len(UpperCAmelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> bool:
"""simple docstring"""
if len(str(UpperCAmelCase__ ) ) > 3:
if not is_prime(int(str(UpperCAmelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCAmelCase__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] = 11 ) -> list[int]:
"""simple docstring"""
a_ : List[Any] = []
a_ : str = 13
while len(UpperCAmelCase__ ) != count:
if validate(UpperCAmelCase__ ):
a_ : List[Any] = list_truncated_nums(UpperCAmelCase__ )
if all(is_prime(UpperCAmelCase__ ) for i in list_nums ):
list_truncated_primes.append(UpperCAmelCase__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 32
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 162
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XGLMTokenizer
lowerCAmelCase__ = XGLMTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[Any] = XGLMTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Any = """<pad>"""
__snake_case: str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(A ) , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = XGLMTokenizer(A , keep_accents=A )
__snake_case: Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__snake_case: Dict = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
__snake_case: Optional[Any] = XGLMTokenizer(f.name , keep_accents=A )
__snake_case: List[Any] = pickle.dumps(A )
pickle.loads(A )
def UpperCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
__snake_case: Union[str, Any] = self.get_tokenizer()
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: int = """I was born in 92000, and this is falsé."""
__snake_case: Dict = tokenizer.tokenize(A )
__snake_case: Optional[Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
__snake_case: Dict = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Optional[Any] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
__snake_case: Optional[int] = self.get_rust_tokenizer()
__snake_case: Union[str, Any] = tokenizer.encode(A )
__snake_case: List[Any] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = """Hello World!"""
__snake_case: List[Any] = [2, 31_227, 4_447, 35]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase__ ( self : Any ):
__snake_case: Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__snake_case: Any = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
# fmt: off
__snake_case: Dict = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/xglm-564M""" , padding=A , )
| 293
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 1
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase( a_, unittest.TestCase ):
lowercase_ : List[Any] = GPTSanJapaneseTokenizer
lowercase_ : str = False
lowercase_ : Any = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().setUp()
# fmt: off
_lowercase : Any = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_lowercase : str = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_lowercase : List[Any] = {"unk_token": "<unk>"}
_lowercase : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['emoji_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
with open(self.emoji_file, 'w') as emoji_writer:
emoji_writer.write(json.dumps(_a))
def UpperCamelCase ( self, **lowerCamelCase) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **_a)
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_lowercase : str = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.get_input_output_texts(_a)
_lowercase : List[str] = tokenizer.encode(_a, add_special_tokens=_a)
_lowercase : Optional[Any] = tokenizer.decode(_a, clean_up_tokenization_spaces=_a)
return text, ids
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = self.get_tokenizer()
# Testing tokenization
_lowercase : Optional[Any] = "こんにちは、世界。 こんばんは、㔺界。"
_lowercase : int = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_lowercase : List[str] = tokenizer.tokenize(_a)
self.assertListEqual(_a, _a)
# Testing conversion to ids without special tokens
_lowercase : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(_a)
self.assertListEqual(_a, _a)
# Testing conversion to ids with special tokens
_lowercase : List[Any] = tokens + [tokenizer.unk_token]
_lowercase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(_a)
self.assertListEqual(_a, _a)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.get_tokenizer()
# Testing tokenization
_lowercase : Union[str, Any] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_lowercase : str = "こんにちは、、、、世界。こんばんは、、、、世界。"
_lowercase : List[str] = tokenizer.encode(_a)
_lowercase : List[str] = tokenizer.decode(_a)
self.assertEqual(_a, _a)
@slow
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
# Testing tokenization
_lowercase : int = "こんにちは、世界。"
_lowercase : List[Any] = "こんばんは、㔺界。😀"
_lowercase : Dict = "こんにちは、世界。こんばんは、世界。😀"
_lowercase : str = tokenizer.encode(prefix_text + input_text)
_lowercase : Optional[Any] = tokenizer.encode('', prefix_text=prefix_text + input_text)
_lowercase : Dict = tokenizer.encode(_a, prefix_text=_a)
_lowercase : str = tokenizer.decode(_a)
_lowercase : Union[str, Any] = tokenizer.decode(_a)
_lowercase : int = tokenizer.decode(_a)
self.assertEqual(_a, _a)
self.assertEqual(_a, _a)
self.assertEqual(_a, _a)
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
# Testing tokenization
_lowercase : Union[str, Any] = "こんにちは、世界。"
_lowercase : List[Any] = "こんばんは、㔺界。😀"
_lowercase : Tuple = len(tokenizer.encode(_a)) - 2
_lowercase : Dict = len(tokenizer.encode(_a)) - 2
_lowercase : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
_lowercase : List[str] = [1] * (len_prefix + len_text + 1) + [0]
_lowercase : List[str] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_lowercase : Optional[Any] = tokenizer(prefix_text + input_text).token_type_ids
_lowercase : Optional[Any] = tokenizer('', prefix_text=prefix_text + input_text).token_type_ids
_lowercase : Dict = tokenizer(_a, prefix_text=_a).token_type_ids
self.assertListEqual(_a, _a)
self.assertListEqual(_a, _a)
self.assertListEqual(_a, _a)
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
_lowercase : str = tokenizer.encode('あンいワ')
_lowercase : Any = tokenizer.encode('', prefix_text='あンいワ')
_lowercase : str = tokenizer.encode('いワ', prefix_text='あン')
self.assertEqual(tokenizer.decode(_a), tokenizer.decode(_a))
self.assertEqual(tokenizer.decode(_a), tokenizer.decode(_a))
self.assertNotEqual(_a, _a)
self.assertNotEqual(_a, _a)
self.assertEqual(x_token_a[1], x_token_a[-1]) # SEG token
self.assertEqual(x_token_a[1], x_token_a[3]) # SEG token
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
_lowercase : Optional[Any] = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_lowercase : Optional[int] = tokenizer(_a, padding=_a)
_lowercase : List[str] = tokenizer.batch_encode_plus(_a, padding=_a)
# fmt: off
_lowercase : Any = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_lowercase : List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_lowercase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids, _a)
self.assertListEqual(x_token.token_type_ids, _a)
self.assertListEqual(x_token.attention_mask, _a)
self.assertListEqual(x_token_a.input_ids, _a)
self.assertListEqual(x_token_a.token_type_ids, _a)
self.assertListEqual(x_token_a.attention_mask, _a)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
| 21
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "laion/clap-htsat-unfused"
lowercase : Optional[int] = tempfile.mkdtemp()
def __magic_name__ ( self , **_a ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self , **_a ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
lowercase : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Optional[int] = self.get_feature_extractor(do_normalize=_a , padding_value=1.0 )
lowercase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : List[str] = self.get_tokenizer()
lowercase : int = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Dict = floats_list((3, 1_000) )
lowercase : str = feature_extractor(_a , return_tensors="np" )
lowercase : Dict = processor(audios=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : Dict = self.get_feature_extractor()
lowercase : int = self.get_tokenizer()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Optional[Any] = "This is a test string"
lowercase : Any = processor(text=_a )
lowercase : List[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_feature_extractor()
lowercase : Any = self.get_tokenizer()
lowercase : Union[str, Any] = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str = processor.batch_decode(_a )
lowercase : Optional[int] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ClapProcessor(tokenizer=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 202
| 0
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCAmelCase : List[str] = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __lowerCAmelCase (_UpperCamelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCAmelCase : int = format(_UpperCamelCase , '08x' )[-8:]
__lowerCAmelCase : Tuple = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Any = b''
for char in message:
bit_string += format(_UpperCamelCase , '08b' ).encode('utf-8' )
__lowerCAmelCase : List[str] = format(len(_UpperCamelCase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_UpperCamelCase ) , 512 ):
__lowerCAmelCase : Tuple = bit_string[pos : pos + 512]
__lowerCAmelCase : str = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __lowerCAmelCase (_UpperCamelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCAmelCase : List[str] = format(_UpperCamelCase , '032b' )
__lowerCAmelCase : Union[str, Any] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCamelCase , 2 )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return (a + b) % 2**32
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = preprocess(_UpperCamelCase )
__lowerCAmelCase : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCAmelCase : Tuple = 0x67452301
__lowerCAmelCase : List[str] = 0xEFCDAB89
__lowerCAmelCase : List[str] = 0x98BADCFE
__lowerCAmelCase : Optional[Any] = 0x10325476
__lowerCAmelCase : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = aa
__lowerCAmelCase : List[str] = ba
__lowerCAmelCase : Dict = ca
__lowerCAmelCase : Optional[int] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCAmelCase : Any = d ^ (b & (c ^ d))
__lowerCAmelCase : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCAmelCase : Union[str, Any] = c ^ (d & (b ^ c))
__lowerCAmelCase : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
__lowerCAmelCase : Optional[int] = b ^ c ^ d
__lowerCAmelCase : str = (3 * i + 5) % 16
else:
__lowerCAmelCase : Optional[Any] = c ^ (b | not_aa(_UpperCamelCase ))
__lowerCAmelCase : Optional[Any] = (7 * i) % 16
__lowerCAmelCase : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCAmelCase : Optional[Any] = d
__lowerCAmelCase : int = c
__lowerCAmelCase : Tuple = b
__lowerCAmelCase : Union[str, Any] = sum_aa(_UpperCamelCase , left_rotate_aa(_UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCAmelCase : str = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Optional[Any] = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = sum_aa(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Any = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('123456789' )
def __lowerCAmelCase ():
for base_num in range(9999 , 4999 , -1 ):
__lowerCAmelCase : Union[str, Any] = 10_0002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCAmelCase : Dict = 100_2003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 182
| 1
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[Any] ) -> str:
'''simple docstring'''
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> List[Any]:
'''simple docstring'''
A__ = 2
if unlogit:
A__ = torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = p * torch.log(SCREAMING_SNAKE_CASE_ )
A__ = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F'{x + 1}' for x in range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE_ ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[int]=True , SCREAMING_SNAKE_CASE_: Tuple=True , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: Optional[int]=False ) -> str:
'''simple docstring'''
A__ , A__ = model.config.num_hidden_layers, model.config.num_attention_heads
A__ = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
A__ = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
if head_mask is None:
A__ = torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A__ = None
A__ = 0.0
A__ = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
A__ = tuple(t.to(args.device ) for t in inputs )
((A__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A__ , A__ , A__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE_ ):
A__ = entropy(attn.detach() , SCREAMING_SNAKE_CASE_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A__ = 2
A__ = torch.pow(torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
A__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
logger.info("Head ranked by importance scores" )
A__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A__ = torch.arange(
head_importance.numel() , device=args.device )
A__ = head_ranks.view_as(SCREAMING_SNAKE_CASE_ )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Any ) -> str:
'''simple docstring'''
A__ , A__ , A__ = compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ )
A__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , SCREAMING_SNAKE_CASE_ , original_score * args.masking_threshold )
A__ = torch.ones_like(SCREAMING_SNAKE_CASE_ )
A__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A__ = original_score
while current_score >= original_score * args.masking_threshold:
A__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A__ = float("Inf" )
A__ = head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
A__ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
A__ = new_head_mask.view(-1 )
A__ = 0.0
A__ = new_head_mask.view_as(SCREAMING_SNAKE_CASE_ )
A__ = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
# Compute metric and head importance again
A__ , A__ , A__ = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
A__ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , SCREAMING_SNAKE_CASE_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("Final head mask" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Any:
'''simple docstring'''
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
A__ = 1 / loss
A__ = datetime.now() - before_time
A__ = sum(p.numel() for p in model.parameters() )
A__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE_ )
A__ = sum(p.numel() for p in model.parameters() )
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , actually_pruned=SCREAMING_SNAKE_CASE_ , )
A__ = 1 / loss
A__ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_0_0 )
save_model(SCREAMING_SNAKE_CASE_ , args.output_dir )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=SCREAMING_SNAKE_CASE_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=SCREAMING_SNAKE_CASE_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=SCREAMING_SNAKE_CASE_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="Batch size." )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=4_2 )
parser.add_argument("--local_rank" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
A__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A__ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
A__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A__ = torch.device("cuda" , args.local_rank )
A__ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A__ = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE_ )
elif args.n_gpu > 1:
A__ = nn.DataParallel(SCREAMING_SNAKE_CASE_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ )
# Prepare dataset
A__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A__ = (torch.from_numpy(SCREAMING_SNAKE_CASE_ ),)
A__ = TensorDataset(*SCREAMING_SNAKE_CASE_ )
A__ = RandomSampler(SCREAMING_SNAKE_CASE_ )
A__ = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A__ = mask_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
prune_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert"""
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
lowerCAmelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = cached_file(lowercase , lowercase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowercase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) )
with open(os.path.join(lowercase , "refs" , "main" ) ) as f:
A__ = f.read()
self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) )
self.assertTrue(os.path.isfile(lowercase ) )
# File is cached at the same place the second time.
A__ = cached_file(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
# Using a specific revision to test the full commit hash.
A__ = cached_file(lowercase , lowercase , revision="9b8c223" )
self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ):
A__ = cached_file("tiny-random-bert" , lowercase )
with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ):
A__ = cached_file(lowercase , lowercase , revision="aaaa" )
with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ):
A__ = cached_file(lowercase , "conf" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ):
A__ = cached_file(lowercase , "conf" )
with open(os.path.join(lowercase , "refs" , "main" ) ) as f:
A__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowercase , ".no_exist" , lowercase , "conf" ) ) )
A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_missing_entries=lowercase )
self.assertIsNone(lowercase )
A__ = cached_file(lowercase , "conf" , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase )
self.assertIsNone(lowercase )
A__ = mock.Mock()
A__ = 500
A__ = {}
A__ = HTTPError
A__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head:
A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_connection_errors=lowercase )
self.assertIsNone(lowercase )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowercase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowercase , revision="ahaha" )
A__ = get_file_from_repo("bert-base-cased" , lowercase )
# The name is the cached name which is not very easy to test, so instead we load the content.
A__ = json.loads(open(lowercase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(lowercase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowercase , "a.txt" ) , str(lowercase ) )
self.assertIsNone(get_file_from_repo(lowercase , "b.txt" ) )
| 68
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class UpperCAmelCase_ ( _UpperCAmelCase):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : List[Any] = {}
if top_k is not None:
UpperCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return super().__call__(lowercase_ , **lowercase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = load_image(lowercase_ )
UpperCamelCase : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
return model_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase : Any = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase : Dict = probs.topk(lowercase_ )
elif self.framework == "tf":
UpperCamelCase : Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCamelCase : List[str] = tf.math.top_k(lowercase_ , k=lowercase_ )
UpperCamelCase : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase : Union[str, Any] = scores.tolist()
UpperCamelCase : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
def A_ ( A__ ) -> list:
a__ : Dict = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a__ , a__ : Any = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowercase : Optional[int] = list(range(1_0, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 99
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: List[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase__: str = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCamelCase__: str = F"{src_lang}-{tgt_lang}"
UpperCamelCase__: Optional[Any] = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(A_ ,exist_ok=A_)
UpperCamelCase__: Union[str, Any] = os.path.join(A_ ,"README.md")
print(F"Generating {path}")
with open(A_ ,"w" ,encoding="utf-8") as f:
f.write(A_)
# make sure we are under the root of the project
A__: Optional[Any] = Path(__file__).resolve().parent.parent.parent
A__: Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A__ , A__ , A__: Optional[Any] = model_name.split('''-''')
A__: List[str] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 149
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
try:
SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE = strtobool(_UpperCAmelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''')
return _value
a_ : str = parse_flag_from_env('RUN_SLOW', default=False)
a_ : Optional[int] = parse_flag_from_env('RUN_REMOTE', default=False)
a_ : Optional[int] = parse_flag_from_env('RUN_LOCAL', default=True)
a_ : Union[str, Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a_ : Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a_ : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a_ : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a_ : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a_ : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a_ : Optional[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a_ : List[str] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCamelCase__ (_UpperCAmelCase):
try:
import faiss # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires faiss')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import regex # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires regex')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import elasticsearch # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires elasticsearch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import sqlalchemy # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires sqlalchemy')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TORCH_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires PyTorch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TF_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires TensorFlow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.JAX_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires JAX')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.PIL_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires Pillow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
def _require_spacy_model(_UpperCAmelCase):
try:
import spacy # noqa F401
spacy.load(_UpperCAmelCase)
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_UpperCAmelCase))(_UpperCAmelCase)
else:
return test_case
return _require_spacy_model
def lowerCamelCase__ (_UpperCAmelCase):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_slow_tests or _run_slow_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is slow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_local_tests or _run_local_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is local')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_packaged_tests or _run_packaged_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is packaged')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_remote_tests or _run_remote_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test requires remote')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (*_UpperCAmelCase):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(_UpperCAmelCase) and name.startswith('test'):
for decorator in decorators:
SCREAMING_SNAKE_CASE = decorator(_UpperCAmelCase)
setattr(cls , _UpperCAmelCase , _UpperCAmelCase)
return cls
return decorate
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = 0
_lowercase : List[Any] = 1
_lowercase : Optional[Any] = 2
@contextmanager
def lowerCamelCase__ (_UpperCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCAmelCase=1e-16):
SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
# Change the url to an invalid url so that the connection hangs
SCREAMING_SNAKE_CASE = 'https://10.255.255.1'
if kwargs.get('timeout') is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
SCREAMING_SNAKE_CASE = timeout
try:
return online_request(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
SCREAMING_SNAKE_CASE = url
SCREAMING_SNAKE_CASE = e.args[0]
SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]'''),)
SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
raise requests.ConnectionError('Offline mode is enabled.' , request=_UpperCAmelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.')
@contextmanager
def lowerCamelCase__ (*_UpperCAmelCase , **_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(Path().resolve())
with tempfile.TemporaryDirectory(*_UpperCAmelCase , **_UpperCAmelCase) as tmp_dir:
try:
os.chdir(_UpperCAmelCase)
yield
finally:
os.chdir(_UpperCAmelCase)
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist() == deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist()
def lowerCamelCase__ (_UpperCAmelCase):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
try:
return func(*_UpperCAmelCase , **_UpperCAmelCase)
except HTTPError as err:
if str(_UpperCAmelCase).startswith('500') or str(_UpperCAmelCase).startswith('502'):
pytest.xfail(str(_UpperCAmelCase))
raise err
return decorator.decorator(_wrapper , _UpperCAmelCase)
class _snake_case :
def __init__( self , a , a , a) -> str:
SCREAMING_SNAKE_CASE = returncode
SCREAMING_SNAKE_CASE = stdout
SCREAMING_SNAKE_CASE = stderr
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
while True:
SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(_UpperCAmelCase)
else:
break
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False):
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def tee(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=""):
SCREAMING_SNAKE_CASE = line.decode('utf-8').rstrip()
sink.append(_UpperCAmelCase)
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:')),
_read_stream(p.stderr , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:')),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=180 , _UpperCAmelCase=False , _UpperCAmelCase=True):
SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase))
SCREAMING_SNAKE_CASE = ' '.join(_UpperCAmelCase)
if result.returncode > 0:
SCREAMING_SNAKE_CASE = '\n'.join(result.stderr)
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''')
return result
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0')
SCREAMING_SNAKE_CASE = re.sub(R'^gw' , '' , _UpperCAmelCase , 0 , re.M)
return int(_UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 2_9500
SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 352
|
import argparse
import datetime
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
SCREAMING_SNAKE_CASE = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCAmelCase) < 11:
raise ValueError('Must be 10 characters long')
# Get month
SCREAMING_SNAKE_CASE = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12')
SCREAMING_SNAKE_CASE = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get day
SCREAMING_SNAKE_CASE = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31')
# Get second separator
SCREAMING_SNAKE_CASE = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get year
SCREAMING_SNAKE_CASE = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?')
# Get datetime obj for validation
SCREAMING_SNAKE_CASE = datetime.date(int(_UpperCAmelCase) , int(_UpperCAmelCase) , int(_UpperCAmelCase))
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE = y - 1
SCREAMING_SNAKE_CASE = m + 12
# maths var
SCREAMING_SNAKE_CASE = int(str(_UpperCAmelCase)[:2])
SCREAMING_SNAKE_CASE = int(str(_UpperCAmelCase)[2:])
SCREAMING_SNAKE_CASE = int(2.6 * m - 5.39)
SCREAMING_SNAKE_CASE = int(c / 4)
SCREAMING_SNAKE_CASE = int(k / 4)
SCREAMING_SNAKE_CASE = int(d + k)
SCREAMING_SNAKE_CASE = int(t + u + v + x)
SCREAMING_SNAKE_CASE = int(z - (2 * c))
SCREAMING_SNAKE_CASE = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.')
# Response
SCREAMING_SNAKE_CASE = F'''Your date {date_input}, is a {days[str(_UpperCAmelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Tuple = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
a_ : Any = parser.parse_args()
zeller(args.date_input)
| 327
| 0
|
"""simple docstring"""
import math
import qiskit
def __A (_SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 ) ->qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(_SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(_SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
lowerCAmelCase__ :List[str] = qiskit.QuantumRegister(4 , 'qr' )
lowerCAmelCase__ :List[str] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
lowerCAmelCase__ :str = [input_a, input_a, carry_in]
lowerCAmelCase__ :Optional[int] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _SCREAMING_SNAKE_CASE ) # measure the last two qbits
lowerCAmelCase__ :Dict = qiskit.Aer.get_backend('aer_simulator' )
lowerCAmelCase__ :Dict = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 293
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A = Lock()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Any = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase__ :List[str] = Pipe()
lowerCAmelCase__ :List[Any] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase__ :Dict = temp_rs
lowerCAmelCase__ :Optional[Any] = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
lowerCAmelCase__ :Union[str, Any] = Pipe()
lowerCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase__ :Union[str, Any] = temp_rs
lowerCAmelCase__ :Any = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : str ={
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] =[
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 262
|
import inspect
import unittest
class __A ( unittest.TestCase ):
def _snake_case ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
lowerCamelCase =inspect.getmembers(UpperCAmelCase_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCamelCase ="""k-diffusion"""
elif backend == "invisible_watermark":
lowerCamelCase ="""invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 262
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
lowerCAmelCase_ = MvpTokenizer
lowerCAmelCase_ = MvpTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_roberta_detectors
def snake_case ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowercase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowercase =dict(zip(_A , range(len(_A ) ) ) )
__lowercase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase ={'''unk_token''': '''<unk>'''}
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def snake_case ( self : Tuple , **__lowercase : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def snake_case ( self : int , **__lowercase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def snake_case ( self : Optional[int] , __lowercase : List[Any] ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Optional[int] ):
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase =[0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase =tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
# Test that special tokens are reset
@require_torch
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase =tokenizer(_A , padding=_A , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('labels' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
@require_torch
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase =tokenizer(text_target=_A , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def snake_case ( self : int ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase =tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=_A , truncation=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =['''A long paragraph for summarization.''']
__lowercase =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowercase =tokenizer(_A , text_target=_A , return_tensors='pt' )
__lowercase =inputs['''input_ids''']
__lowercase =inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def snake_case ( self : Any ):
"""simple docstring"""
pass
def snake_case ( self : List[str] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase =self.rust_tokenizer_class.from_pretrained(_A , **_A )
__lowercase =self.tokenizer_class.from_pretrained(_A , **_A )
__lowercase ='''A, <mask> AllenNLP sentence.'''
__lowercase =tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
__lowercase =tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__lowercase =tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__lowercase =tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 141
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( __a ):
def __init__( self : List[str] , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
UpperCAmelCase__ : List[str] = field
UpperCAmelCase__ : Optional[Any] = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
UpperCAmelCase__ : Any = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
UpperCAmelCase__ : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
UpperCAmelCase__ : str = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase__ : Dict = dataset
UpperCAmelCase__ : Any = path_or_buf
UpperCAmelCase__ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : Tuple = num_proc
UpperCAmelCase__ : Any = '''utf-8'''
UpperCAmelCase__ : Optional[int] = to_json_kwargs
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.to_json_kwargs.pop('''path_or_buf''' , _A )
UpperCAmelCase__ : Optional[int] = self.to_json_kwargs.pop('''orient''' , '''records''' )
UpperCAmelCase__ : Tuple = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
UpperCAmelCase__ : str = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
UpperCAmelCase__ : Optional[Any] = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
UpperCAmelCase__ : Union[str, Any] = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
UpperCAmelCase__ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def lowercase_ ( self : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = args
UpperCAmelCase__ : Dict = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : str = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase_ ( self : Union[str, Any] , _A : BinaryIO , _A : Optional[int] , _A : int , _A : Any , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
UpperCAmelCase__ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 181
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a__ : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
a__ : List[Any] = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
a__ : int = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
a__ : Any = '\n{0} = None\n'
a__ : List[str] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
a__ : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowercase ( ):
'''simple docstring'''
with open(os.path.join(__A ,"""__init__.py""" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowercase ( __A ,__A ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A ,__A )
else:
return DUMMY_CLASS.format(__A ,__A )
def _lowercase ( __A=None ):
'''simple docstring'''
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = """[""" + """, """.join(f"\"{b}\"" for b in backend.split("""_and_""" ) ) + """]"""
__UpperCamelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A ,__A ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _lowercase ( __A=False ):
'''simple docstring'''
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(__A ,"""utils""" )
__UpperCamelCase = {
backend: os.path.join(__A ,f"dummy_{short_names.get(__A ,__A )}_objects.py" )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(__A ,__A )}_objects.py as the main "
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f"diffusers.utils.dummy_{short_names.get(__A ,__A )}_objects.py. Run `make fix-copies` "
"""to fix this.""" )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a__ : Union[str, Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 350
|
'''simple docstring'''
from PIL import Image
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = image.size
__UpperCamelCase = 0
__UpperCamelCase = image.load()
for i in range(__A ):
for j in range(__A ):
__UpperCamelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__UpperCamelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : Optional[int] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 243
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13
|
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ):
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[Any] ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ):
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
_A = attentions
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ):
_A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 315
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ):
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 364
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( _snake_case : int ) -> Any:
'''simple docstring'''
random.seed(_snake_case )
np.random.seed(_snake_case )
torch.manual_seed(_snake_case )
torch.cuda.manual_seed_all(_snake_case )
# ^^ safe to call this function even if cuda is not available
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Iterable[torch.nn.Parameter] , _UpperCAmelCase : float = 0.9999 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[float, int] = 1.0 , _UpperCAmelCase : Union[float, int] = 2 / 3 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Dict[str, Any] = None , **_UpperCAmelCase : Optional[int] , ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get('max_value' , _UpperCAmelCase ) is not None:
_A = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['max_value']
if kwargs.get('min_value' , _UpperCAmelCase ) is not None:
_A = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['min_value']
_A = list(_UpperCAmelCase )
_A = [p.clone().detach() for p in parameters]
if kwargs.get('device' , _UpperCAmelCase ) is not None:
_A = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
self.to(device=kwargs['device'] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def lowerCAmelCase_ ( cls : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ):
_A , _A = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase )
_A = model_cls.from_pretrained(_UpperCAmelCase )
_A = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(_UpperCAmelCase )
return ema_model
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop('shadow_params' , _UpperCAmelCase )
model.register_to_config(**_UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int ):
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(_UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_A = max(_UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
_A = list(_UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = list(_UpperCAmelCase )
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None ):
_A = [
p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : dict ):
_A = copy.deepcopy(_UpperCAmelCase )
_A = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_A = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , _UpperCAmelCase ):
raise ValueError('Invalid min_decay' )
_A = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , _UpperCAmelCase ):
raise ValueError('Invalid optimization_step' )
_A = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , _UpperCAmelCase ):
raise ValueError('Invalid update_after_step' )
_A = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _UpperCAmelCase ):
raise ValueError('Invalid use_ema_warmup' )
_A = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_A = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_A = state_dict.get('shadow_params' , _UpperCAmelCase )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , _UpperCAmelCase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 271
| 0
|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
try:
__A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__A = default
else:
# KEY is set, convert it to True or False.
try:
__A = strtobool(__a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase_ = parse_flag_from_env('RUN_SLOW', default=False)
lowercase_ = parse_flag_from_env('RUN_REMOTE', default=False)
lowercase_ = parse_flag_from_env('RUN_LOCAL', default=True)
lowercase_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowercase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowercase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowercase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowercase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowercase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowercase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowercase_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__A = unittest.skip('''test requires faiss''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__A = unittest.skip('''test requires regex''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__A = unittest.skip('''test requires elasticsearch''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__A = unittest.skip('''test requires sqlalchemy''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__A = unittest.skip('''test requires PyTorch''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
__A = unittest.skip('''test requires TensorFlow''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
__A = unittest.skip('''test requires JAX''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
__A = unittest.skip('''test requires Pillow''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def _require_spacy_model(__UpperCamelCase ):
try:
import spacy # noqa F401
spacy.load(__a )
except ImportError:
return unittest.skip('''test requires spacy''' )(__a )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__a ) )(__a )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__A = unittest.skip('''test is slow''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__A = unittest.skip('''test is local''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__A = unittest.skip('''test is packaged''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__A = unittest.skip('''test requires remote''' )(__a )
return test_case
def lowerCAmelCase ( *__UpperCamelCase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__a ) and name.startswith('''test''' ):
for decorator in decorators:
__A = decorator(__a )
setattr(cls , __a , __a )
return cls
return decorate
class snake_case ( snake_case_ ):
'''simple docstring'''
pass
class snake_case ( snake_case_ ):
'''simple docstring'''
A_ : Any = 0
A_ : Union[str, Any] = 1
A_ : Optional[Any] = 2
@contextmanager
def lowerCAmelCase ( __UpperCamelCase=OfflineSimulationMode.CONNECTION_FAILS , __UpperCamelCase=1e-1_6 ):
"""simple docstring"""
__A = requests.Session().request
def timeout_request(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
# Change the url to an invalid url so that the connection hangs
__A = 'https://10.255.255.1'
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
__A = timeout
try:
return online_request(__a , __a , **__a )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__A = url
__A = e.args[0]
__A = (max_retry_error.args[0].replace('''10.255.255.1''' , f'OfflineMock[{url}]' ),)
__A = (max_retry_error,)
raise
def raise_connection_error(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__a )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __a ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __a ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __a ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
__A = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__a , **__a ) as tmp_dir:
try:
os.chdir(__a )
yield
finally:
os.chdir(__a )
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return deepcopy(__a ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(__a ).integers(0 , 1_0_0 , 1_0 ).tolist()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
try:
return func(*__a , **__a )
except HTTPError as err:
if str(__a ).startswith('''500''' ) or str(__a ).startswith('''502''' ):
pytest.xfail(str(__a ) )
raise err
return decorator.decorator(_wrapper , __a )
class snake_case :
'''simple docstring'''
def __init__( self : Dict, _lowerCamelCase : Optional[int], _lowerCamelCase : List[Any], _lowerCamelCase : List[str] ):
'''simple docstring'''
__A = returncode
__A = stdout
__A = stderr
async def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
while True:
__A = await stream.readline()
if line:
callback(__a )
else:
break
async def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__a ) )
__A = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__A = []
__A = []
def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ):
__A = line.decode('''utf-8''' ).rstrip()
sink.append(__a )
if not quiet:
print(__a , __a , file=__a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCamelCase : tee(__a , __a , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda __UpperCamelCase : tee(__a , __a , sys.stderr , label='''stderr:''' ) ),
] , timeout=__a , )
return _RunOutput(await p.wait() , __a , __a )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=1_8_0 , __UpperCamelCase=False , __UpperCamelCase=True ):
"""simple docstring"""
__A = asyncio.get_event_loop()
__A = loop.run_until_complete(
_stream_subprocess(__a , env=__a , stdin=__a , timeout=__a , quiet=__a , echo=__a ) )
__A = ' '.join(__a )
if result.returncode > 0:
__A = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def lowerCAmelCase ( ):
"""simple docstring"""
__A = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__A = re.sub(r'''^gw''' , '''''' , __a , 0 , re.M )
return int(__a )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = 2_9_5_0_0
__A = pytest_xdist_worker_id()
return port + uniq_delta
| 266
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327
| 0
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ :
A_ = None
@experimental
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return _map_with_joblib(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase : int = num_proc if num_proc <= len(SCREAMING_SNAKE_CASE ) else len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [] # We organize the splits ourselve (contiguous splits)
for index in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ) // num_proc
__lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) % num_proc
__lowerCAmelCase : Optional[Any] = div * index + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(SCREAMING_SNAKE_CASE )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
__lowerCAmelCase , __lowerCAmelCase : Dict = None, None
if not disable_tqdm:
__lowerCAmelCase , __lowerCAmelCase : int = (RLock(),), tqdm.set_lock
with Pool(SCREAMING_SNAKE_CASE , initargs=SCREAMING_SNAKE_CASE , initializer=SCREAMING_SNAKE_CASE ) as pool:
__lowerCAmelCase : Optional[Any] = pool.map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(F'''Finished {num_proc} processes''' )
__lowerCAmelCase : List[Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(SCREAMING_SNAKE_CASE )} objects''' )
return mapped
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :str ) -> str:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__lowerCAmelCase : Tuple = None
| 232
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int | float]] ) -> int:
__lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = len(matrix[0] )
__lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for row in range(SCREAMING_SNAKE_CASE ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__lowerCAmelCase : Optional[int] = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE ):
if matrix[i][row] != 0:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = matrix[i], matrix[row]
__lowerCAmelCase : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 1
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def snake_case_ ( ):
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261
| 0
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCamelCase__ = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = {}
state_dict.pop('pixel_mean' , __lowerCamelCase )
state_dict.pop('pixel_std' , __lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : List[str] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : Dict = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(2 ) )
if layer_nb == 0:
__lowerCAmelCase : Tuple = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__lowerCAmelCase : Optional[Any] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__lowerCAmelCase : str = key.replace('layers.2' , 'proj_out' )
__lowerCAmelCase : Dict = value
__lowerCAmelCase : List[str] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="ybelkada/segment-anything" ):
__lowerCAmelCase : List[str] = hf_hub_download(__lowerCamelCase , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__lowerCAmelCase : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
__lowerCAmelCase : Optional[int] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowerCAmelCase : str = SamConfig(
vision_config=__lowerCamelCase , )
elif "sam_vit_h" in model_name:
__lowerCAmelCase : int = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowerCAmelCase : Dict = SamConfig(
vision_config=__lowerCamelCase , )
__lowerCAmelCase : Optional[int] = torch.load(__lowerCamelCase , map_location='cpu' )
__lowerCAmelCase : Union[str, Any] = replace_keys(__lowerCamelCase )
__lowerCAmelCase : Optional[int] = SamImageProcessor()
__lowerCAmelCase : List[Any] = SamProcessor(image_processor=__lowerCamelCase )
__lowerCAmelCase : List[Any] = SamModel(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
__lowerCAmelCase : Any = hf_model.to('cuda' )
__lowerCAmelCase : Optional[int] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
__lowerCAmelCase : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('RGB' )
__lowerCAmelCase : Tuple = [[[400, 650]]]
__lowerCAmelCase : List[Any] = [[1]]
__lowerCAmelCase : Optional[int] = processor(images=np.array(__lowerCamelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowerCAmelCase : Dict = hf_model(**__lowerCamelCase )
__lowerCAmelCase : Tuple = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
__lowerCAmelCase : List[Any] = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = hf_model(**__lowerCamelCase )
__lowerCAmelCase : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
__lowerCAmelCase : Tuple = ((75, 275, 1725, 850),)
__lowerCAmelCase : str = processor(images=np.array(__lowerCamelCase ) , input_boxes=__lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowerCAmelCase : Dict = hf_model(**__lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
__lowerCAmelCase : Optional[Any] = [[[400, 650], [800, 650]]]
__lowerCAmelCase : Tuple = [[1, 1]]
__lowerCAmelCase : Optional[int] = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = hf_model(**__lowerCamelCase )
__lowerCAmelCase : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
lowerCamelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 360
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 182
| 0
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__A : Dict = 2048
__A : Optional[Any] = 4096
__A : Any = 42
__A : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
__A : Optional[Any] = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def choose_first(_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 1:
_UpperCAmelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_UpperCAmelCase = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
_UpperCAmelCase = {'''id''': example['''id''']}
_UpperCAmelCase = example['''annotations''']
_UpperCAmelCase = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
_UpperCAmelCase = ['''yes'''] if 1 in yes_no_answer else ['''no''']
_UpperCAmelCase = _UpperCAmelCase = []
_UpperCAmelCase = _UpperCAmelCase = []
_UpperCAmelCase = ['''<cls>''']
else:
_UpperCAmelCase = ['''short''']
_UpperCAmelCase = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
_UpperCAmelCase = ['''long''']
_UpperCAmelCase = choose_first(annotation['''long_answer'''] , is_long_answer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
answer.update(_SCREAMING_SNAKE_CASE )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
_UpperCAmelCase = True
else:
_UpperCAmelCase = False
_UpperCAmelCase = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , _SCREAMING_SNAKE_CASE ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str=False ):
'''simple docstring'''
_UpperCAmelCase = _get_single_answer(_SCREAMING_SNAKE_CASE )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCAmelCase = example['''document''']['''tokens''']
_UpperCAmelCase = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(_SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_UpperCAmelCase = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_UpperCAmelCase = example['''document''']['''tokens''']
_UpperCAmelCase = answer['''start_token''']
_UpperCAmelCase = answer['''end_token''']
_UpperCAmelCase = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_UpperCAmelCase = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
_UpperCAmelCase = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
_UpperCAmelCase = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
_UpperCAmelCase = ''' '''.join([old[i] for i in range(len(_SCREAMING_SNAKE_CASE ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , _SCREAMING_SNAKE_CASE , end='''\n''' )
print('''Old:''' , _SCREAMING_SNAKE_CASE , end='''\n\n''' )
return {
"context": " ".join(_SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str]=2048 , _SCREAMING_SNAKE_CASE : str=4096 , _SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
_UpperCAmelCase = get_context_and_ans(_SCREAMING_SNAKE_CASE , assertion=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_UpperCAmelCase = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
_UpperCAmelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = input_ids[:q_len]
_UpperCAmelCase = range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
for i in doc_start_indices:
_UpperCAmelCase = i + max_length - q_len
_UpperCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_SCREAMING_SNAKE_CASE ),
"end_token": [-100] * len(_SCREAMING_SNAKE_CASE ),
"category": category,
},
}
_UpperCAmelCase = out['''context'''].split()
_UpperCAmelCase = splitted_context[answer['''end_token''']]
_UpperCAmelCase = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=_SCREAMING_SNAKE_CASE , ).input_ids )
_UpperCAmelCase = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_UpperCAmelCase = len(tokenizer(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_UpperCAmelCase = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
_UpperCAmelCase = answer['''start_token''']
_UpperCAmelCase = answer['''end_token''']
if assertion:
_UpperCAmelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , _SCREAMING_SNAKE_CASE , end='''\n\n''' )
if len(_SCREAMING_SNAKE_CASE ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_UpperCAmelCase = input_ids[:q_len]
_UpperCAmelCase = range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
_UpperCAmelCase = i + max_length - q_len
_UpperCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_UpperCAmelCase = start_token - i + q_len
_UpperCAmelCase = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
_UpperCAmelCase = -100
_UpperCAmelCase = -100
answers_category.append('''null''' )
_UpperCAmelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_SCREAMING_SNAKE_CASE )
answers_end_token.append(_SCREAMING_SNAKE_CASE )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(_SCREAMING_SNAKE_CASE ) )
print('''Old:''' , tokenizer.decode(_SCREAMING_SNAKE_CASE ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str]=2048 , _SCREAMING_SNAKE_CASE : Optional[int]=4096 , _SCREAMING_SNAKE_CASE : List[Any]=False ):
'''simple docstring'''
_UpperCAmelCase = get_strided_contexts_and_ans(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , doc_stride=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , assertion=_SCREAMING_SNAKE_CASE , )
return example
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
with jsonlines.open(_SCREAMING_SNAKE_CASE , '''a''' ) as writer:
for example in tqdm(_SCREAMING_SNAKE_CASE , total=len(_SCREAMING_SNAKE_CASE ) , desc='''Saving samples ... ''' ):
_UpperCAmelCase = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__A : List[str] = load_dataset("natural_questions")
__A : Any = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__A : int = data["train" if PROCESS_TRAIN == "true" else "validation"]
__A : str = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
__A : Dict = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__A : List[str] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__A : Optional[int] = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 260
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Tuple:
a_ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
a_ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(__UpperCAmelCase)
from datasets import load_dataset
a_ = load_dataset("nielsr/rvlcdip-demo")
a_ = dataset["train"][0]["image"].convert("RGB")
a_ = image_processor(__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
a_ = outputs.logits
a_ = torch.Size((1, 16))
self.assertEqual(logits.shape , __UpperCAmelCase)
a_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4))
| 243
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Any ) -> Any:
# save results
if os.path.exists(snake_case__ ):
if os.path.exists(os.path.join(snake_case__ , 'config.json' ) ) and os.path.isfile(
os.path.join(snake_case__ , 'config.json' ) ):
os.remove(os.path.join(snake_case__ , 'config.json' ) )
if os.path.exists(os.path.join(snake_case__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(snake_case__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
else:
os.makedirs(snake_case__ )
model.save_pretrained(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: Dict=False ) -> List[Any]:
UpperCAmelCase__ = 2
if unlogit:
UpperCAmelCase__ = torch.pow(snake_case__ , snake_case__ )
UpperCAmelCase__ = p * torch.log(snake_case__ )
UpperCAmelCase__ = 0
return -plogp.sum(dim=-1 )
def UpperCamelCase_( snake_case__: Optional[Any] ) -> str:
logger.info('lv, h >\t' + '\t'.join(f"{x + 1}" for x in range(len(snake_case__ ) ) ) )
for row in range(len(snake_case__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '\t'.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '\t'.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def UpperCamelCase_( snake_case__: str , snake_case__: int , snake_case__: List[Any] , snake_case__: List[str]=True , snake_case__: Dict=True , snake_case__: Optional[Any]=None , snake_case__: Any=False ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
UpperCAmelCase__ = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
if head_mask is None:
UpperCAmelCase__ = torch.ones(snake_case__ , snake_case__ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for step, inputs in enumerate(tqdm(snake_case__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ = model(snake_case__ , labels=snake_case__ , head_mask=snake_case__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case__ ):
UpperCAmelCase__ = entropy(attn.detach() , snake_case__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ = 2
UpperCAmelCase__ = torch.pow(torch.pow(snake_case__ , snake_case__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(snake_case__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(snake_case__ )
logger.info('Head ranked by importance scores' )
UpperCAmelCase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ = head_ranks.view_as(snake_case__ )
print_ad_tensor(snake_case__ )
return attn_entropy, head_importance, total_loss
def UpperCamelCase_( snake_case__: Any , snake_case__: Union[str, Any] , snake_case__: List[str] ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ )
UpperCAmelCase__ = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , snake_case__ , original_score * args.masking_threshold )
UpperCAmelCase__ = torch.ones_like(snake_case__ )
UpperCAmelCase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ = float('Inf' )
UpperCAmelCase__ = head_importance.view(-1 ).sort()[1]
if len(snake_case__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
UpperCAmelCase__ = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ = new_head_mask.view(-1 )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = new_head_mask.view_as(snake_case__ )
UpperCAmelCase__ = new_head_mask.clone().detach()
print_ad_tensor(snake_case__ )
# Compute metric and head importance again
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , head_mask=snake_case__ )
UpperCAmelCase__ = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , snake_case__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(snake_case__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCamelCase_( snake_case__: int , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Optional[int] ) -> Union[str, Any]:
UpperCAmelCase__ = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ )
UpperCAmelCase__ = 1 / loss
UpperCAmelCase__ = datetime.now() - before_time
UpperCAmelCase__ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ = [
v,
]
assert sum(len(snake_case__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case__ )
UpperCAmelCase__ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ , actually_pruned=snake_case__ , )
UpperCAmelCase__ = 1 / loss
UpperCAmelCase__ = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , snake_case__ , snake_case__ , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , snake_case__ , snake_case__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(snake_case__ , args.output_dir )
def UpperCamelCase_( ) -> List[str]:
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=snake_case__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=snake_case__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=snake_case__ , type=snake_case__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=snake_case__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=snake_case__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=snake_case__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=snake_case__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=snake_case__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=snake_case__ , help='Batch size.' )
parser.add_argument('--seed' , type=snake_case__ , default=42 )
parser.add_argument('--local_rank' , type=snake_case__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=snake_case__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=snake_case__ , default='' , help='Can be used for distant debugging.' )
UpperCAmelCase__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
UpperCAmelCase__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ = torch.device('cuda' , args.local_rank )
UpperCAmelCase__ = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ = nn.parallel.DistributedDataParallel(
snake_case__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case__ )
elif args.n_gpu > 1:
UpperCAmelCase__ = nn.DataParallel(snake_case__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Prepare dataset
UpperCAmelCase__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ = (torch.from_numpy(snake_case__ ),)
UpperCAmelCase__ = TensorDataset(*snake_case__ )
UpperCAmelCase__ = RandomSampler(snake_case__ )
UpperCAmelCase__ = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case__ , snake_case__ , snake_case__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ = mask_heads(snake_case__ , snake_case__ , snake_case__ )
prune_heads(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 335
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335
| 1
|
"""simple docstring"""
from ... import PretrainedConfig
lowerCAmelCase__ = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
"""simple docstring"""
a : str =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a : str ='''nezha'''
def __init__( self , snake_case__=21_128 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=64 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0.1 , snake_case__=0 , snake_case__=2 , snake_case__=3 , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Tuple = max_relative_position
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : Any = classifier_dropout
lowerCAmelCase : Dict = use_cache
| 108
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : DatasetDict , __a : List[int] , __a : List[int] , __a : int = 1_6 ):
"""simple docstring"""
_a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : str = DatasetDict(
{
'train': dataset['train'].select(__a ),
'validation': dataset['train'].select(__a ),
'test': dataset['validation'],
} )
def tokenize_function(__a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : List[str] = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Tuple = 1_6
elif accelerator.mixed_precision != "no":
_a : List[Any] = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = []
# Download the dataset
_a : Tuple = load_dataset('glue' , 'mrpc' )
# Create our splits
_a : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[Any] = config['lr']
_a : Optional[int] = int(config['num_epochs'] )
_a : Dict = int(config['seed'] )
_a : Dict = int(config['batch_size'] )
_a : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__a )
# New Code #
# Create our folds:
_a : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_a : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__a ):
_a, _a, _a : Optional[Any] = get_fold_dataloaders(
__a , __a , __a , __a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Dict = model(**__a )
_a : int = outputs.loss
_a : Any = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Tuple = outputs.logits.argmax(dim=-1 )
_a, _a : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
# New Code #
# We also run predictions on the test set at the very end
_a : Any = []
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Tuple = model(**__a )
_a : Dict = outputs.logits
_a, _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a : Dict = torch.cat(__a , dim=0 )
_a : Any = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a : str = metric.compute(predictions=__a , references=__a )
accelerator.print('Average test metrics from all folds:' , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=__a , default=3 , help='The number of splits to perform across the dataset' )
_a : Any = parser.parse_args()
_a : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 271
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Any = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__lowerCAmelCase ).json()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 10 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
lowerCAmelCase__ : Union[str, Any] = requests.get(__lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__lowerCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 10 ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = hackernews_top_stories(__lowerCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**__lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 371
|
'''simple docstring'''
from PIL import Image
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = image.size
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : int = image.load()
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
lowerCAmelCase__ : int = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase ):
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Dict = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCAmelCase = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 184
| 0
|
import os
def _SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
with open(os.path.dirname(_lowerCamelCase) + "/grid.txt") as f:
__UpperCamelCase : List[Any] = [] # noqa: E741
for _ in range(20):
l.append([int(_lowerCamelCase) for x in f.readline().split()])
__UpperCamelCase : Tuple = 0
# right
for i in range(20):
for j in range(17):
__UpperCamelCase : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__UpperCamelCase : str = temp
# down
for i in range(17):
for j in range(20):
__UpperCamelCase : Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__UpperCamelCase : List[Any] = temp
# diagonal 1
for i in range(17):
for j in range(17):
__UpperCamelCase : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__UpperCamelCase : int = temp
# diagonal 2
for i in range(17):
for j in range(3 , 20):
__UpperCamelCase : Dict = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__UpperCamelCase : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 232
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Any) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCamelCase : Any = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
__UpperCamelCase : str = F'{src_lang}-{tgt_lang}'
__UpperCamelCase : Tuple = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
__UpperCamelCase : Dict = os.path.join(_lowerCamelCase , "README.md")
print(F'Generating {path}')
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(_lowerCamelCase)
# make sure we are under the root of the project
lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
lowercase : Union[str, Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase , lowercase , lowercase : int = model_name.split('-')
lowercase : Dict = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 232
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = 0
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Tuple = Path(__lowercase ) / """preprocessor_config.json"""
a__ : List[Any] = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
a__ : str = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Tuple = Path(__lowercase ) / """preprocessor_config.json"""
a__ : int = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[str] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : Union[str, Any] = Path(__lowercase ) / """preprocessor_config.json"""
a__ : Dict = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : str = AutoImageProcessor.from_pretrained(__lowercase ).to_dict()
config_dict.pop("""image_processor_type""" )
a__ : Optional[int] = CLIPImageProcessor(**__lowercase )
# save in new folder
model_config.save_pretrained(__lowercase )
config.save_pretrained(__lowercase )
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase )
# make sure private variable is not incorrectly saved
a__ : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Tuple = Path(__lowercase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
a__ : Tuple = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , """clip-base is not a local folder and is not a valid model identifier""" ):
a__ : Tuple = AutoImageProcessor.from_pretrained("""clip-base""" )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase , revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
a__ : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
with self.assertRaises(__lowercase ):
a__ : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
a__ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
a__ : List[str] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoImageProcessor.register(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Any = Path(__lowercase ) / """preprocessor_config.json"""
a__ : int = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
a__ : Union[str, Any] = CustomImageProcessor.from_pretrained(__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
a__ : Tuple = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = True
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local
a__ : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : List[str] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 266
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowercase : Union[str, Any] =logging.get_logger(__name__)
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 266
| 1
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''AutoImageProcessor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> str:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
__a = False
def __call__( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
__a = kwargs.pop('''images''' , _snake_case )
__a = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__a = args[0]
__a = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__a = self.image_processor(_snake_case , *_snake_case , **_snake_case )
if text is not None:
__a = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
__a = True
__a = self.tokenizer
yield
__a = self.image_processor
__a = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False , _snake_case=None ) -> int:
'''simple docstring'''
if added_vocab is None:
__a = self.tokenizer.get_added_vocab()
__a = {}
while tokens:
__a = re.search(r'''<s_(.*?)>''' , _snake_case , re.IGNORECASE )
if start_token is None:
break
__a = start_token.group(1 )
__a = re.search(rF"""</s_{key}>""" , _snake_case , re.IGNORECASE )
__a = start_token.group()
if end_token is None:
__a = tokens.replace(_snake_case , '''''' )
else:
__a = end_token.group()
__a = re.escape(_snake_case )
__a = re.escape(_snake_case )
__a = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _snake_case , re.IGNORECASE )
if content is not None:
__a = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__a = self.tokenajson(_snake_case , is_inner_value=_snake_case , added_vocab=_snake_case )
if value:
if len(_snake_case ) == 1:
__a = value[0]
__a = value
else: # leaf nodes
__a = []
for leaf in content.split(r'''<sep/>''' ):
__a = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__a = leaf[1:-2] # for categorical special tokens
output[key].append(_snake_case )
if len(output[key] ) == 1:
__a = output[key][0]
__a = tokens[tokens.find(_snake_case ) + len(_snake_case ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_snake_case , added_vocab=_snake_case )
if len(_snake_case ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 6
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : str = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE : str = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __A ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' )
def __A ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __A ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Any = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Any ):
'''simple docstring'''
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCamelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 182
| 0
|
import os
lowerCAmelCase : Any = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: Any = 0
while index < len(_UpperCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: Dict = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE_: List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = ""
SCREAMING_SNAKE_CASE_: Optional[Any] = num // 10_00
numerals += m_count * "M"
num %= 10_00
SCREAMING_SNAKE_CASE_: Any = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
SCREAMING_SNAKE_CASE_: str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A_ ( _UpperCAmelCase = "/p089_roman.txt" ):
SCREAMING_SNAKE_CASE_: int = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE_: Dict = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE_: Optional[int] = line.strip()
SCREAMING_SNAKE_CASE_: Tuple = parse_roman_numerals(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase : Tuple = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = """hopper-medium-v2"""
lowerCAmelCase : Optional[int] = gym.make(env_name)
lowerCAmelCase : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
lowerCAmelCase : Optional[Any] = env.reset()
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Optional[int] = 1000
lowerCAmelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = env.step(denorm_actions)
lowerCAmelCase : Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase : Tuple = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 127
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE_ : Dict = logging.getLogger(__name__)
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
# save results
if os.path.exists(UpperCAmelCase_ ):
if os.path.exists(os.path.join(UpperCAmelCase_ , """config.json""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , """config.json""" ) ):
os.remove(os.path.join(UpperCAmelCase_ , """config.json""" ) )
if os.path.exists(os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) )
else:
os.makedirs(UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=False ):
A__ = 2
if unlogit:
A__ = torch.pow(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = p * torch.log(UpperCAmelCase_ )
A__ = 0
return -plogp.sum(dim=-1 )
def _snake_case ( UpperCAmelCase_ : str ):
logger.info("""lv, h >\t""" + """\t""".join(F"""{x + 1}""" for x in range(len(UpperCAmelCase_ ) ) ) )
for row in range(len(UpperCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=False ):
A__ , A__ = model.config.num_hidden_layers, model.config.num_attention_heads
A__ = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
A__ = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
if head_mask is None:
A__ = torch.ones(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A__ = None
A__ = 0.0
A__ = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
A__ = tuple(t.to(args.device ) for t in inputs )
((A__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A__ , A__ , A__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase_ ):
A__ = entropy(attn.detach() , UpperCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A__ = 2
A__ = torch.pow(torch.pow(UpperCAmelCase_ , UpperCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
A__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(UpperCAmelCase_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(UpperCAmelCase_ )
logger.info("""Head ranked by importance scores""" )
A__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A__ = torch.arange(
head_importance.numel() , device=args.device )
A__ = head_ranks.view_as(UpperCAmelCase_ )
print_ad_tensor(UpperCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
A__ , A__ , A__ = compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ )
A__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , UpperCAmelCase_ , original_score * args.masking_threshold )
A__ = torch.ones_like(UpperCAmelCase_ )
A__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A__ = original_score
while current_score >= original_score * args.masking_threshold:
A__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A__ = float("""Inf""" )
A__ = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
A__ = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
A__ = new_head_mask.view(-1 )
A__ = 0.0
A__ = new_head_mask.view_as(UpperCAmelCase_ )
A__ = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase_ )
# Compute metric and head importance again
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
A__ = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , UpperCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(UpperCAmelCase_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
A__ = 1 / loss
A__ = datetime.now() - before_time
A__ = sum(p.numel() for p in model.parameters() )
A__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = [
v,
]
assert sum(len(UpperCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase_ )
A__ = sum(p.numel() for p in model.parameters() )
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , actually_pruned=UpperCAmelCase_ , )
A__ = 1 / loss
A__ = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , UpperCAmelCase_ , UpperCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(UpperCAmelCase_ , args.output_dir )
def _snake_case ( ):
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCAmelCase_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=UpperCAmelCase_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=UpperCAmelCase_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=UpperCAmelCase_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=UpperCAmelCase_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=UpperCAmelCase_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=UpperCAmelCase_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=UpperCAmelCase_ , default=42 )
parser.add_argument("""--local_rank""" , type=UpperCAmelCase_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=UpperCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=UpperCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A__ = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
A__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A__ = torch.device("""cuda""" , args.local_rank )
A__ = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A__ = nn.parallel.DistributedDataParallel(
UpperCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase_ )
elif args.n_gpu > 1:
A__ = nn.DataParallel(UpperCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase_ )
# Prepare dataset
A__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A__ = (torch.from_numpy(UpperCAmelCase_ ),)
A__ = TensorDataset(*UpperCAmelCase_ )
A__ = RandomSampler(UpperCAmelCase_ )
A__ = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A__ = mask_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
prune_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 335
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335
| 1
|
from torch import nn
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
super().__init__()
_SCREAMING_SNAKE_CASE : List[Any] = class_size
_SCREAMING_SNAKE_CASE : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_SCREAMING_SNAKE_CASE : int = nn.Linear(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_SCREAMING_SNAKE_CASE : Optional[Any] = self.mlp(__lowerCamelCase )
return logits
| 325
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 1
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = os.path.join(args.tf_model_dir , "parameters.json" )
a : str = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
a : str = args.output + ".pt"
a : Dict = OrderedDict()
with tf.device("/CPU:0" ):
a : Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
a : Optional[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a : Dict = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
a : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
a : Optional[int] = 8
a : Dict = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/moe" ):
a : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[str] = torch.tensor(_lowercase )
elif key_name.endswith("/softmlp/kernel" ):
a : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
a : Any = key_name[-9:-7]
for i in range(16 ):
a : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
a : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/mlp" ):
a : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
a : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.endswith("/p1/bias" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
a : List[Any] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.endswith("/p2/kernel" ):
a : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/p2/bias" ):
a : Dict = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : str = torch.tensor(_lowercase )
elif key_name.startswith("model/ln" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.bias" % player
a : Tuple = vnp.copy() # same because it is one dimensional
a : int = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/att" ):
a : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
a : Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a : List[str] = state[:, 0, :, :]
a : Dict = state[:, 1, :, :]
a : Union[str, Any] = state[:, 2, :, :]
a : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
a : Union[str, Any] = torch.tensor(_lowercase )
a : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
a : List[str] = torch.tensor(_lowercase )
a : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
a : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/o/kernel" ):
a : Any = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
a : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/an" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
a : Union[str, Any] = vnp.copy() # same because it is one dimensional
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Any = "model.blocks.%d.self_attn.norm.weight" % player
a : str = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
a : Optional[int] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
a : Tuple = "model.%s.weight" % nlayer
a : Any = vnp.copy() # same in embedded
a : Tuple = torch.tensor(_lowercase )
if key_name.startswith("model/wte" ):
a : Optional[int] = "lm_head.weight"
a : Optional[int] = vnp.copy() # same in embedded
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("model/wob" ):
a : Optional[int] = "final_logits_bias"
a : Optional[Any] = vnp.copy() # same in embedded
a : Optional[int] = state.reshape((1, -1) )
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
a : Optional[int] = "model.last_project.weight"
a : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
a : Dict = "model.last_project.bias"
a : Optional[Any] = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
a : Tuple = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 105
|
from collections import defaultdict
def lowercase_ ( _A : int ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Dict = True
for v in tree[start]:
if v not in visited:
ret += dfs(_A )
if ret % 2 == 0:
cuts.append(_A )
return ret
def lowercase_ ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
A, A : Tuple = 10, 9
A : int = defaultdict(list)
A : dict[int, bool] = {}
A : list[int] = []
A : List[str] = 0
A : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 184
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['LayoutLMv2FeatureExtractor']
lowercase_ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 266
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = current_set.copy()
for row_index, row in enumerate(__UpperCamelCase ):
__A = row[0]
for column_index, column in enumerate(__UpperCamelCase ):
if magnitude == 0:
__A = column
continue
__A = column / magnitude
# Subtract to cancel term
__A = current_set[0]
__A = [first_row]
__A = current_set[1::]
for row in current_set:
__A = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__UpperCamelCase )
continue
for column_index in range(len(__UpperCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__UpperCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__A = final_set[0]
__A = []
__A = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__A = simplify(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __UpperCamelCase )
__A = resultant
return final_set
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
__A = len(__UpperCamelCase ) + 1
if any(len(__UpperCamelCase ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__UpperCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__A = equations.copy()
if any(0 in row for row in data_set ):
__A = data_set.copy()
__A = []
for row_index, row in enumerate(__UpperCamelCase ):
if 0 not in row:
__A = data_set.pop(__UpperCamelCase )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __UpperCamelCase )
__A = data_set.copy()
__A = simplify(__UpperCamelCase )
__A = simplified[::-1]
__A = []
for row in simplified:
__A = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__A = row.copy()[: len(__UpperCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__UpperCamelCase ) == 0:
solutions.append(0 )
continue
__A = temp_row[1::]
__A = temp_row[::-1]
for column_index, column in enumerate(__UpperCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__UpperCamelCase )
__A = []
for item in solutions:
final.append(float(round(__UpperCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 266
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Any = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Union[str, Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase : Optional[int] = False
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCamelCase = DDIMScheduler(**A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'cpu'
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**A_ )
UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCamelCase = init_image.resize((512, 512) )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase = torch.from_numpy(np.array(A_ ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = 'A robot, 4k photo'
UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt='' , ).to_tuple()
UpperCamelCase = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 110
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.