code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Union[str, Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
move_disk(UpperCAmelCase_ , UpperCAmelCase_ )
move_tower(height - 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
'''simple docstring'''
print('''moving disk from''' , UpperCAmelCase_ , '''to''' , UpperCAmelCase_ )
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = int(input('''Height of hanoi: ''' ).strip() )
move_tower(UpperCAmelCase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 322
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : int = 8 , **UpperCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
_lowercase : str = do_rescale
_lowercase : int = rescale_factor
_lowercase : Optional[int] = do_pad
_lowercase : Optional[int] = pad_size
def lowerCAmelCase_ ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_lowercase , _lowercase : Dict = get_image_size(UpperCamelCase )
_lowercase : Optional[int] = (old_height // size + 1) * size - old_height
_lowercase : str = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : int , ):
"""simple docstring"""
_lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Optional[int] = do_pad if do_pad is not None else self.do_pad
_lowercase : Any = pad_size if pad_size is not None else self.pad_size
_lowercase : List[Any] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_lowercase : Optional[int] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_pad:
_lowercase : int = [self.pad(UpperCamelCase , size=UpperCamelCase ) for image in images]
_lowercase : str = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
_lowercase : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 322
| 1
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
A_ = ["text", "image", "audio"]
def _UpperCamelCase ( __UpperCamelCase ) -> Tuple:
lowerCamelCase_ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
inputs.append(create_inputs(UpperCAmelCase__ ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = []
for output in outputs:
if isinstance(UpperCAmelCase__ ,(str, AgentText) ):
output_types.append('text' )
elif isinstance(UpperCAmelCase__ ,(Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(UpperCAmelCase__ ,(torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCamelCase_ = self.tool.inputs
for _input in inputs:
if isinstance(_input , _A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase_ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = create_inputs(self.tool.inputs )
lowerCamelCase_ = self.tool(*_A )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase_ = [outputs]
self.assertListEqual(output_types(_A ) , self.tool.outputs )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = create_inputs(self.tool.inputs )
lowerCamelCase_ = self.tool(*_A )
if not isinstance(_A , _A ):
lowerCamelCase_ = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
for output, output_type in zip(_A , self.tool.outputs ):
lowerCamelCase_ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_A , _A ) )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = create_inputs(self.tool.inputs )
lowerCamelCase_ = []
for _input, input_type in zip(_A , self.tool.inputs ):
if isinstance(_A , _A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase_ = self.tool(*_A )
if not isinstance(_A , _A ):
lowerCamelCase_ = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
| 714
|
'''simple docstring'''
A_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
A_ = ["a", "b", "c", "d", "e"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = start
# add current to visited
visited.append(__UpperCamelCase )
lowerCamelCase_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
A_ = topological_sort("a", [], [])
print(sort)
| 384
| 0
|
'''simple docstring'''
import requests
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : Union[str, Any] = {'''Content-Type''': '''application/json'''}
snake_case__ : Tuple = requests.post(__lowerCAmelCase , json={'''text''': message_body} , headers=__lowerCAmelCase )
if response.status_code != 2_00:
snake_case__ : Dict = (
'''Request to slack returned an error '''
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 347
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case__ : List[str] = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case_ , snake_case_ )
def __magic_name__ ( self : Tuple , **snake_case_ : Tuple ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : Any , **snake_case_ : str ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : List[str] , **snake_case_ : Optional[int] ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : List[str] = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[Any] = self.get_rust_tokenizer()
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : int = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
snake_case__ : int = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Union[str, Any] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
snake_case__ : List[Any] = self.get_image_processor(do_normalize=snake_case_ )
snake_case__ : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=snake_case_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : Dict = self.prepare_image_inputs()
snake_case__ : Optional[Any] = image_processor(snake_case_ , return_tensors='''np''' )
snake_case__ : str = processor(images=snake_case_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Any = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : Optional[int] = '''Alexandra,T-shirt的价格是15便士。'''
snake_case__ : List[Any] = processor(text=snake_case_ )
snake_case__ : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Tuple = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : int = '''Alexandra,T-shirt的价格是15便士。'''
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Optional[int] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Dict = processor.batch_decode(snake_case_ )
snake_case__ : int = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : Dict = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case__ : Any = '''Alexandra,T-shirt的价格是15便士。'''
snake_case__ : Any = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 347
| 1
|
'''simple docstring'''
from math import isqrt, loga
def __lowerCAmelCase ( UpperCamelCase ) -> list[int]:
lowerCAmelCase__ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[str] = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def __lowerCAmelCase ( UpperCamelCase = 800800 , UpperCamelCase = 800800 ) -> int:
lowerCAmelCase__ : List[str] = degree * loga(UpperCamelCase )
lowerCAmelCase__ : Tuple = int(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = calculate_prime_numbers(UpperCamelCase )
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : str = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 715
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = SMALL_MODEL_IDENTIFIER
lowerCAmelCase__ : List[str] = '''pt'''
lowerCAmelCase__ : int = '''tf'''
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : str = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''mock_framework'''
# Framework provided - return whatever the user provides
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ : int = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
lowerCAmelCase__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase__ : Union[str, Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase__ : Union[str, Any] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase__ : Optional[int] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ : Dict = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(self.test_model )
| 470
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _lowerCAmelCase(a : list[float] ) -> Any:
return np.maximum(0 , a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 255
| 1
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowerCamelCase (__lowerCamelCase : List[Any] ) -> Any:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCamelCase__ ( nn.Module ):
def __init__( self : Optional[int] , lowerCamelCase : nn.Module , lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
a__ = module
a__ = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase__ , bias=lowerCAmelCase__ ) , nn.Linear(lowerCAmelCase__ , module.out_features , bias=lowerCAmelCase__ ) , )
a__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __a ( self : Dict , lowerCamelCase : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.module(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) + self.adapter(lowerCAmelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : str = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Union[str, Any] = 2.109_659_552_692_574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Optional[Any] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : int = 10
def __a ( self : List[Any] ):
'''simple docstring'''
# Models and tokenizer
a__ = AutoTokenizer.from_pretrained(self.model_name )
class UpperCamelCase__ ( _snake_case ):
def __a ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
a__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Any ):
'''simple docstring'''
a__ = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase__ , "quantization_config" ) )
a__ = config.to_dict()
a__ = config.to_diff_dict()
a__ = config.to_json_string()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
a__ = self.model_fpaa.get_memory_footprint()
a__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __a ( self : Tuple ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = self.tokenizer(self.input_text , return_tensors="pt" )
a__ = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
def __a ( self : Dict ):
'''simple docstring'''
a__ = BitsAndBytesConfig()
a__ = True
a__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase__ , device_map="auto" )
a__ = self.tokenizer(self.input_text , return_tensors="pt" )
a__ = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
def __a ( self : str ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase__ )
def __a ( self : int ):
'''simple docstring'''
a__ = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase__ ):
a__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase__ , load_in_abit=lowerCAmelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __a ( self : Any ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ = self.tokenizer(self.input_text , return_tensors="pt" )
a__ = self.model_fpaa.to(torch.floataa )
a__ = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
a__ = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ = self.model_fpaa.half()
# Check this does not throw an error
a__ = self.model_fpaa.float()
def __a ( self : Any ):
'''simple docstring'''
a__ = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCAmelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
@classmethod
def __a ( cls : str ):
'''simple docstring'''
a__ = "t5-small"
a__ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ = AutoTokenizer.from_pretrained(cls.model_name )
a__ = "Translate in German: Hello, my dog is cute"
def __a ( self : Optional[int] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[str] ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
a__ = TaForConditionalGeneration._keep_in_fpaa_modules
a__ = None
# test with `t5-small`
a__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
a__ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ = model.generate(**lowerCAmelCase__ )
# test with `flan-t5-small`
a__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
a__ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ = model.generate(**lowerCAmelCase__ )
a__ = modules
def __a ( self : Any ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ = model.generate(**lowerCAmelCase__ )
# test with `flan-t5-small`
a__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
a__ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ = model.generate(**lowerCAmelCase__ )
class UpperCamelCase__ ( _snake_case ):
def __a ( self : str ):
'''simple docstring'''
super().setUp()
# model_name
a__ = "bigscience/bloom-560m"
a__ = "t5-small"
# Different types of model
a__ = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# Sequence classification model
a__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# CausalLM model
a__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# Seq2seq model
a__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
def __a ( self : Optional[int] ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Any ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCamelCase__ ( _snake_case ):
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
def __a ( self : Optional[Any] ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCamelCase__ ( _snake_case ):
def __a ( self : List[str] ):
'''simple docstring'''
super().setUp()
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
class UpperCamelCase__ ( _snake_case ):
def __a ( self : Dict ):
'''simple docstring'''
a__ = "facebook/opt-350m"
super().setUp()
def __a ( self : Tuple ):
'''simple docstring'''
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase__ ) ):
a__ = LoRALayer(module.q_proj , rank=1_6 )
a__ = LoRALayer(module.k_proj , rank=1_6 )
a__ = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
a__ = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ = model.forward(**lowerCAmelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCamelCase__ ( _snake_case ):
lowerCAmelCase__ : Optional[int] = "gpt2-xl"
lowerCAmelCase__ : str = 3.3_191_854_854_152_187
| 701
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCAmelCase_ : str = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCAmelCase_ : Any = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCAmelCase_ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCAmelCase_ : Tuple = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
lowerCAmelCase_ : Any = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowerCAmelCase_ : Dict = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
lowerCAmelCase_ : Optional[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCAmelCase_ : Optional[Any] = np.expand_dims(test_image, axis=0)
lowerCAmelCase_ : Union[str, Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCAmelCase_ : Optional[int] = "Normal"
if result[0][0] == 1:
lowerCAmelCase_ : Union[str, Any] = "Abnormality detected"
| 289
| 0
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__UpperCAmelCase =os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(4_2)
__UpperCAmelCase ="sshleifer/student_marian_en_ro_6_1"
__UpperCAmelCase ="sshleifer/tiny-mbart"
@require_torch
class a__ ( snake_case__ ):
def SCREAMING_SNAKE_CASE__ ( self : int , a : str=False , a : Dict=None , a : List[Any]=True , a : List[str]=True , a : int=True , a : Optional[int]=True , ):
"""simple docstring"""
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(A__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if "eval_loss" in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , A__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=A__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=A__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=A__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=A__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=A__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=A__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
__lowerCamelCase = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data['''n_matches'''] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(A__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if "eval_loss" in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , A__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(A__ )
__lowerCamelCase = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(a : Optional[Any] ) -> Tuple[int, float]:
__lowerCamelCase = "--skip_memory_metrics 0"
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3e-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(A__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
A__ , A__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
A__ , A__ , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : int , a : Dict , a : Tuple , a : Optional[Any] , a : List[str] = 3e-3 , a : Union[str, Any] = "adafactor" , a : List[Any] = False , a : int = None , a : Union[str, Any] = 0 , a : str = True , a : List[str] = True , a : List[str] = True , a : Optional[Any] = True , a : Tuple = None , ):
"""simple docstring"""
__lowerCamelCase = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f"""\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n """.split()
__lowerCamelCase = f"""\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n """.split()
__lowerCamelCase = "\n --do_predict\n ".split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f"""\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n """.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
__lowerCamelCase = ["run_translation.py"] + args
with patch.object(A__ , '''argv''' , A__ ):
main()
return output_dir
| 546
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowercase ( _a=None ,_a=None ) -> List[Any]:
return field(default_factory=lambda: default ,metadata=_a )
@dataclass
class UpperCAmelCase__ :
snake_case_ = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
snake_case_ = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
snake_case_ = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Benchmark training of model'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Verbose memory tracing'''} )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Trace memory line by line'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save result to a CSV file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save all print statements in a log file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Whether to print environment information'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
snake_case_ = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
snake_case_ = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
snake_case_ = field(
default=F'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
snake_case_ = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
snake_case_ = field(
default=F'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
snake_case_ = field(
default=F'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
snake_case_ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def snake_case_ ( self ):
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A__ , )
def snake_case_ ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case_ ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 137
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = MobileBertConfig.from_json_file(UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ : Tuple = MobileBertForPreTraining(UpperCamelCase )
# Load weights from tf checkpoint
lowerCAmelCase__ : Dict = load_tf_weights_in_mobilebert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 160
|
'''simple docstring'''
from math import factorial
_lowerCAmelCase = {str(d): factorial(d) for d in range(10)}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , UpperCamelCase ) if sum_of_digit_factorial(UpperCamelCase ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 160
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Dict ) -> Any:
'''simple docstring'''
inspect_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = path + ".py"
assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Optional[int]:
'''simple docstring'''
inspect_metric(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = path + ".py"
assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] ) -> Any:
'''simple docstring'''
A__ = get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Any ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: str ) -> Optional[int]:
'''simple docstring'''
A__ = get_dataset_config_names(SCREAMING_SNAKE_CASE_ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] ) -> Tuple:
'''simple docstring'''
A__ = get_dataset_infos(SCREAMING_SNAKE_CASE_ )
assert list(infos.keys() ) == expected_configs
A__ = expected_configs[0]
assert expected_config in infos
A__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
A__ = get_dataset_infos(SCREAMING_SNAKE_CASE_ )
assert expected_config in infos
A__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] ) -> List[str]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
| 514
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
def _lowerCamelCase( a ):
if isinstance(a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 256}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = offset
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(lowerCamelCase , size["shortest_edge"] , default_to_square=lowerCamelCase )
elif "height" in size and "width" in size:
__a = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ):
__a = image.astype(np.floataa )
if offset:
__a = image - (scale / 2)
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
__a = to_numpy_array(lowerCamelCase )
if do_resize:
__a = self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase )
if do_center_crop:
__a = self.center_crop(lowerCamelCase , size=lowerCamelCase )
if do_rescale:
__a = self.rescale(image=lowerCamelCase , scale=lowerCamelCase , offset=lowerCamelCase )
if do_normalize:
__a = self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase )
__a = to_channel_dimension_format(lowerCamelCase , lowerCamelCase )
return image
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = offset if offset is not None else self.offset
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
__a = make_batched(lowerCamelCase )
__a = [
[
self._preprocess_image(
image=lowerCamelCase , do_resize=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , do_center_crop=lowerCamelCase , crop_size=lowerCamelCase , do_rescale=lowerCamelCase , rescale_factor=lowerCamelCase , offset=lowerCamelCase , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , data_format=lowerCamelCase , )
for img in video
]
for video in videos
]
__a = {"pixel_values": videos}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 528
| 0
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 275
|
"""simple docstring"""
import random
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
_UpperCAmelCase = {i: [] for i in range(lowercase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase ):
for j in range(i + 1 ,lowercase ):
if random.random() < probability:
graph[i].append(lowercase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase )
return graph
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return {
i: [j for j in range(lowercase ) if i != j] for i in range(lowercase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
def _a ( __UpperCamelCase : str ):
lowerCAmelCase__ : List[Any] = 0
for ch in input_str:
lowerCAmelCase__ : List[Any] = ord(__a )
lowerCAmelCase__ : Any = pow(2 ,__a )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Dict , ) -> Any:
"""simple docstring"""
super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase )
A__ = Sql(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , )
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , )
# Build dataset for splits
A__ = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class A :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
A__ = dataset
A__ = name
A__ = con
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = to_sql_kwargs
def a_ ( self : Any ) -> int:
"""simple docstring"""
A__ = self.to_sql_kwargs.pop("""sql""" , __lowerCAmelCase )
A__ = self.to_sql_kwargs.pop("""con""" , __lowerCAmelCase )
A__ = self.to_sql_kwargs.pop("""index""" , __lowerCAmelCase )
A__ = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs )
return written
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ , A__ , A__ = args
A__ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
A__ = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas()
A__ = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase )
return num_rows or len(__lowerCAmelCase )
def a_ ( self : Optional[int] , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 176
| 0
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilderConfig ):
lowercase__ = None
lowercase__ = None
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilder ):
lowercase__ = datasets.Audio()
lowercase__ = "audio"
lowercase__ = AudioFolderConfig
lowercase__ = 42 # definition at the bottom of the script
lowercase__ = AudioClassification(audio_column="audio" , label_column="label" )
UpperCAmelCase : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase : Any = AUDIO_EXTENSIONS
| 100
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[list[int]]:
'''simple docstring'''
lowercase_ = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
lowercase_ = nums.pop(0 )
lowercase_ = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
def backtrack(__lowerCAmelCase ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = nums[i], nums[start]
backtrack(start + 1 )
lowercase_ , lowercase_ = nums[i], nums[start] # backtrack
lowercase_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase : Optional[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 100
| 1
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase ( UpperCAmelCase__ : int):
random.seed(UpperCAmelCase__)
np.random.seed(UpperCAmelCase__)
torch.manual_seed(UpperCAmelCase__)
torch.cuda.manual_seed_all(UpperCAmelCase__)
# ^^ safe to call this function even if cuda is not available
class __snake_case :
def __init__( self, A, A = 0.9999, A = 0.0, A = 0, A = False, A = 1.0, A = 2 / 3, A = None, A = None, **A, ):
"""simple docstring"""
if isinstance(A, torch.nn.Module ):
lowerCamelCase : str = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`', '1.0.0', A, standard_warn=A, )
lowerCamelCase : Any = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowerCamelCase : Optional[Any] = True
if kwargs.get('max_value', A ) is not None:
lowerCamelCase : int = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value', '1.0.0', A, standard_warn=A )
lowerCamelCase : List[Any] = kwargs['max_value']
if kwargs.get('min_value', A ) is not None:
lowerCamelCase : List[Any] = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value', '1.0.0', A, standard_warn=A )
lowerCamelCase : Any = kwargs['min_value']
lowerCamelCase : List[Any] = list(A )
lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('device', A ) is not None:
lowerCamelCase : str = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device', '1.0.0', A, standard_warn=A )
self.to(device=kwargs['device'] )
lowerCamelCase : Dict = None
lowerCamelCase : Optional[Any] = decay
lowerCamelCase : Dict = min_decay
lowerCamelCase : str = update_after_step
lowerCamelCase : Dict = use_ema_warmup
lowerCamelCase : Dict = inv_gamma
lowerCamelCase : str = power
lowerCamelCase : str = 0
lowerCamelCase : int = None # set in `step()`
lowerCamelCase : Union[str, Any] = model_cls
lowerCamelCase : Optional[Any] = model_config
@classmethod
def UpperCAmelCase_ ( cls, A, A ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Optional[Any] = model_cls.load_config(A, return_unused_kwargs=A )
lowerCamelCase : str = model_cls.from_pretrained(A )
lowerCamelCase : Union[str, Any] = cls(model.parameters(), model_cls=A, model_config=model.config )
ema_model.load_state_dict(A )
return ema_model
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
lowerCamelCase : Dict = self.model_cls.from_config(self.model_config )
lowerCamelCase : List[Any] = self.state_dict()
state_dict.pop('shadow_params', A )
model.register_to_config(**A )
self.copy_to(model.parameters() )
model.save_pretrained(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = max(0, optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowerCamelCase : Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowerCamelCase : Dict = (1 + step) / (10 + step)
lowerCamelCase : Dict = min(A, self.decay )
# make sure decay is not smaller than min_decay
lowerCamelCase : Any = max(A, self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if isinstance(A, torch.nn.Module ):
lowerCamelCase : Tuple = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`', '1.0.0', A, standard_warn=A, )
lowerCamelCase : Union[str, Any] = parameters.parameters()
lowerCamelCase : Any = list(A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowerCamelCase : Tuple = self.get_decay(self.optimization_step )
lowerCamelCase : List[Any] = decay
lowerCamelCase : Optional[Any] = 1 - decay
lowerCamelCase : Tuple = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params, A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(A, modifier_rank=A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Tuple = list(A )
for s_param, param in zip(self.shadow_params, A ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase_ ( self, A=None, A=None ):
"""simple docstring"""
lowerCamelCase : int = [
p.to(device=A, dtype=A ) if p.is_floating_point() else p.to(device=A )
for p in self.shadow_params
]
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params, A ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowerCamelCase : Tuple = None
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = copy.deepcopy(A )
lowerCamelCase : int = state_dict.get('decay', self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
lowerCamelCase : Optional[Any] = state_dict.get('min_decay', self.min_decay )
if not isinstance(self.min_decay, A ):
raise ValueError('Invalid min_decay' )
lowerCamelCase : Union[str, Any] = state_dict.get('optimization_step', self.optimization_step )
if not isinstance(self.optimization_step, A ):
raise ValueError('Invalid optimization_step' )
lowerCamelCase : int = state_dict.get('update_after_step', self.update_after_step )
if not isinstance(self.update_after_step, A ):
raise ValueError('Invalid update_after_step' )
lowerCamelCase : Optional[int] = state_dict.get('use_ema_warmup', self.use_ema_warmup )
if not isinstance(self.use_ema_warmup, A ):
raise ValueError('Invalid use_ema_warmup' )
lowerCamelCase : List[str] = state_dict.get('inv_gamma', self.inv_gamma )
if not isinstance(self.inv_gamma, (float, int) ):
raise ValueError('Invalid inv_gamma' )
lowerCamelCase : Union[str, Any] = state_dict.get('power', self.power )
if not isinstance(self.power, (float, int) ):
raise ValueError('Invalid power' )
lowerCamelCase : Any = state_dict.get('shadow_params', A )
if shadow_params is not None:
lowerCamelCase : List[str] = shadow_params
if not isinstance(self.shadow_params, A ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(A, torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 320
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = KandinskyVaaPipeline
_lowerCAmelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
_lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''']
_lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase = False
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : List[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase : Tuple = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.dummy_unet
lowerCamelCase : int = self.dummy_movq
lowerCamelCase : Any = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='linear', beta_start=0.0_0085, beta_end=0.012, clip_sample=A, set_alpha_to_one=A, steps_offset=1, prediction_type='epsilon', thresholding=A, )
lowerCamelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase_ ( self, A, A=0 ):
"""simple docstring"""
lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(A ) ).to(A )
lowerCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
A )
if str(A ).startswith('mps' ):
lowerCamelCase : str = torch.manual_seed(A )
else:
lowerCamelCase : int = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : Dict = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**A )
lowerCamelCase : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Tuple = pipe(**self.get_dummy_inputs(A ) )
lowerCamelCase : str = output.images
lowerCamelCase : List[str] = pipe(
**self.get_dummy_inputs(A ), return_dict=A, )[0]
lowerCamelCase : Any = image[0, -3:, -3:, -1]
lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : int = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowerCamelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(A )
lowerCamelCase : Optional[int] = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder', torch_dtype=torch.floataa )
lowerCamelCase : Tuple = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowerCamelCase : Dict = 'red cat, 4k photo'
lowerCamelCase : Optional[int] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Union[str, Any] = pipe_prior(
A, generator=A, num_inference_steps=5, negative_prompt='', ).to_tuple()
lowerCamelCase : Tuple = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase : List[str] = pipeline(
image_embeds=A, negative_image_embeds=A, generator=A, num_inference_steps=100, output_type='np', )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A, A )
| 320
| 1
|
_lowerCAmelCase = '''Tobias Carryer'''
from time import time
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=int(time() ) ) -> int: # noqa: B008
lowerCAmelCase__ : Union[str, Any] = multiplier
lowerCAmelCase__ : List[Any] = increment
lowerCAmelCase__ : List[Any] = modulo
lowerCAmelCase__ : Tuple = seed
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Optional[Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_lowerCAmelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 701
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 160
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
snake_case = logging.get_logger(__name__)
# General docstring
snake_case = """ResNetConfig"""
# Base docstring
snake_case = """microsoft/resnet-50"""
snake_case = [1, 2_0_4_8, 7, 7]
# Image classification docstring
snake_case = """microsoft/resnet-50"""
snake_case = """tiger cat"""
snake_case = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,__A : int ,__A : int ,__A : int = 3 ,__A : int = 1 ,__A : str = "relu" ) -> str:
super().__init__()
_lowercase = nn.Convad(
__A ,__A ,kernel_size=__A ,stride=__A ,padding=kernel_size // 2 ,bias=__A )
_lowercase = nn.BatchNormad(__A )
_lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCAmelCase ( self : Tuple ,__A : Tensor ) -> Tensor:
_lowercase = self.convolution(__A )
_lowercase = self.normalization(__A )
_lowercase = self.activation(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,__A : ResNetConfig ) -> Tuple:
super().__init__()
_lowercase = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
_lowercase = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
_lowercase = config.num_channels
def __UpperCAmelCase ( self : int ,__A : Tensor ) -> Tensor:
_lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowercase = self.embedder(__A )
_lowercase = self.pooler(__A )
return embedding
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,__A : int ,__A : int ,__A : int = 2 ) -> int:
super().__init__()
_lowercase = nn.Convad(__A ,__A ,kernel_size=1 ,stride=__A ,bias=__A )
_lowercase = nn.BatchNormad(__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Tensor ) -> Tensor:
_lowercase = self.convolution(__A )
_lowercase = self.normalization(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,__A : int ,__A : int ,__A : int = 1 ,__A : str = "relu" ) -> Any:
super().__init__()
_lowercase = in_channels != out_channels or stride != 1
_lowercase = (
ResNetShortCut(__A ,__A ,stride=__A ) if should_apply_shortcut else nn.Identity()
)
_lowercase = nn.Sequential(
ResNetConvLayer(__A ,__A ,stride=__A ) ,ResNetConvLayer(__A ,__A ,activation=__A ) ,)
_lowercase = ACTaFN[activation]
def __UpperCAmelCase ( self : List[Any] ,__A : Dict ) -> Any:
_lowercase = hidden_state
_lowercase = self.layer(__A )
_lowercase = self.shortcut(__A )
hidden_state += residual
_lowercase = self.activation(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : int ,__A : int ,__A : int = 1 ,__A : str = "relu" ,__A : int = 4 ) -> List[str]:
super().__init__()
_lowercase = in_channels != out_channels or stride != 1
_lowercase = out_channels // reduction
_lowercase = (
ResNetShortCut(__A ,__A ,stride=__A ) if should_apply_shortcut else nn.Identity()
)
_lowercase = nn.Sequential(
ResNetConvLayer(__A ,__A ,kernel_size=1 ) ,ResNetConvLayer(__A ,__A ,stride=__A ) ,ResNetConvLayer(__A ,__A ,kernel_size=1 ,activation=__A ) ,)
_lowercase = ACTaFN[activation]
def __UpperCAmelCase ( self : List[Any] ,__A : List[str] ) -> Any:
_lowercase = hidden_state
_lowercase = self.layer(__A )
_lowercase = self.shortcut(__A )
hidden_state += residual
_lowercase = self.activation(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,__A : ResNetConfig ,__A : int ,__A : int ,__A : int = 2 ,__A : int = 2 ,) -> List[str]:
super().__init__()
_lowercase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__A ,__A ,stride=__A ,activation=config.hidden_act ) ,*[layer(__A ,__A ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Tensor ) -> Tensor:
_lowercase = input
for layer in self.layers:
_lowercase = layer(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,__A : ResNetConfig ) -> Union[str, Any]:
super().__init__()
_lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
_lowercase = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__A ,config.depths[1:] ):
self.stages.append(ResNetStage(__A ,__A ,__A ,depth=__A ) )
def __UpperCAmelCase ( self : Optional[int] ,__A : Tensor ,__A : bool = False ,__A : bool = True ) -> BaseModelOutputWithNoAttention:
_lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase = hidden_states + (hidden_state,)
_lowercase = stage_module(__A )
if output_hidden_states:
_lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__A ,hidden_states=__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ResNetConfig
SCREAMING_SNAKE_CASE_ : Any = '''resnet'''
SCREAMING_SNAKE_CASE_ : Tuple = '''pixel_values'''
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Tuple ,__A : int ) -> int:
if isinstance(__A ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(__A ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def __UpperCAmelCase ( self : Any ,__A : List[Any] ,__A : Union[str, Any]=False ) -> Optional[Any]:
if isinstance(__A ,__A ):
_lowercase = value
snake_case = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCAmelCase , )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : List[Any] ) -> Dict:
super().__init__(__A )
_lowercase = config
_lowercase = ResNetEmbeddings(__A )
_lowercase = ResNetEncoder(__A )
_lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__A ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __UpperCAmelCase ( self : int ,__A : Tensor ,__A : Optional[bool] = None ,__A : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = self.embedder(__A )
_lowercase = self.encoder(
__A ,output_hidden_states=__A ,return_dict=__A )
_lowercase = encoder_outputs[0]
_lowercase = self.pooler(__A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__A ,pooler_output=__A ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase , )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : Optional[int] ) -> int:
super().__init__(__A )
_lowercase = config.num_labels
_lowercase = ResNetModel(__A )
# classification head
_lowercase = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[torch.LongTensor] = None ,__A : Optional[bool] = None ,__A : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = self.resnet(__A ,output_hidden_states=__A ,return_dict=__A )
_lowercase = outputs.pooler_output if return_dict else outputs[1]
_lowercase = self.classifier(__A )
_lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase = 'single_label_classification'
else:
_lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowercase = MSELoss()
if self.num_labels == 1:
_lowercase = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowercase = loss_fct(__A ,__A )
elif self.config.problem_type == "single_label_classification":
_lowercase = CrossEntropyLoss()
_lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowercase = BCEWithLogitsLoss()
_lowercase = loss_fct(__A ,__A )
if not return_dict:
_lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A ,logits=__A ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCAmelCase , )
class A_ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Any ) -> List[str]:
super().__init__(__A )
super()._init_backbone(__A )
_lowercase = [config.embedding_size] + config.hidden_sizes
_lowercase = ResNetEmbeddings(__A )
_lowercase = ResNetEncoder(__A )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@replace_return_docstrings(output_type=__A ,config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self : List[str] ,__A : Tensor ,__A : Optional[bool] = None ,__A : Optional[bool] = None ) -> BackboneOutput:
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = self.embedder(__A )
_lowercase = self.encoder(__A ,output_hidden_states=__A ,return_dict=__A )
_lowercase = outputs.hidden_states
_lowercase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowercase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__A ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__A ,)
| 67
|
def a__ ( A__ = 5_0_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set()
SCREAMING_SNAKE_CASE_ : Optional[int] = int((limit - 2_4) ** (1 / 2) )
SCREAMING_SNAKE_CASE_ : Dict = set(range(3, prime_square_limit + 1, 2 ) )
primes.add(2 )
for p in range(3, prime_square_limit + 1, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, prime_square_limit + 1, A__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE_ : int = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE_ : Dict = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
SCREAMING_SNAKE_CASE_ : Optional[int] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE_ : str = square + cube + tetr
if total >= limit:
break
ret.add(A__ )
return len(A__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = GPTSwaTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = False
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase: Optional[Any] = GPTSwaTokenizer(A_ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: int = '''This is a test'''
_lowercase: Union[str, Any] = '''This is a test'''
return input_text, output_text
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: List[Any] = '''<s>'''
_lowercase: str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(A_ ) , 2000 )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: List[Any] = GPTSwaTokenizer(A_ )
_lowercase: List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [465, 287, 265, 631, 842] )
_lowercase: List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
A_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
_lowercase: Any = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_lowercase: Optional[int] = tokenizer.convert_ids_to_tokens(A_ )
# fmt: off
self.assertListEqual(
A_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Optional[int] = GPTSwaTokenizer(A_ )
_lowercase: List[Any] = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
_lowercase: Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A_ , A_ ):
self.assertListEqual(tokenizer.encode_fast(A_ ) , A_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(A_ , A_ ):
self.assertEqual(tokenizer.decode_fast(A_ ) , A_ )
@slow
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: Any = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
_lowercase: Optional[int] = {'''input_ids''': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=A_ , )
| 272
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase: List[str] = np.shape(_UpperCamelCase )
if rows != columns:
_lowercase: int = (
'''\'table\' has to be of square shaped array but got a '''
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_UpperCamelCase )
_lowercase: Tuple = np.zeros((rows, columns) )
_lowercase: Tuple = np.zeros((rows, columns) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
_lowercase: List[Any] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_lowercase: Any = (table[i][j] - total) / upper[j][j]
_lowercase: Any = 1
for j in range(_UpperCamelCase , _UpperCamelCase ):
_lowercase: Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
_lowercase: List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272
| 1
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for char in word:
lowerCAmelCase = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase = set()
for token in tokens:
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
return word_list
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCAmelCase = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
lowerCAmelCase = bert_tokens
lowerCAmelCase , lowerCAmelCase = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
lowerCAmelCase = True
if is_chinese(bert_word[start] ):
lowerCAmelCase = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
lowerCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase = """##""" + bert_word[j]
lowerCAmelCase = start + i
lowerCAmelCase = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : LTP , SCREAMING_SNAKE_CASE : BertTokenizer ):
'''simple docstring'''
lowerCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 1_00 ):
lowerCAmelCase = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowerCAmelCase = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 1_00 ):
lowerCAmelCase = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
for id in input_ids:
lowerCAmelCase = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
lowerCAmelCase = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = [json.dumps(SCREAMING_SNAKE_CASE ) + """\n""" for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 532
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 532
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = create_tensor(UpperCAmelCase__ )
a = gather(UpperCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ):
'''simple docstring'''
a = [state.process_index]
a = gather_object(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == state.num_processes, F"""{gathered_obj}, {len(UpperCAmelCase__ )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
a = create_tensor(UpperCAmelCase__ )
a = broadcast(UpperCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
if state.is_main_process:
a = torch.arange(state.num_processes + 1 ).to(state.device )
else:
a = torch.arange(state.num_processes ).to(state.device )
a = pad_across_processes(UpperCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
a = create_tensor(UpperCAmelCase__ )
a = reduce(UpperCAmelCase__ , "sum" )
a = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
a = create_tensor(UpperCAmelCase__ )
a = reduce(UpperCAmelCase__ , "mean" )
a = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
main()
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = PartialState()
state.print(F"""State: {state}""" )
state.print("testing gather" )
test_gather(UpperCAmelCase__ )
state.print("testing gather_object" )
test_gather_object(UpperCAmelCase__ )
state.print("testing broadcast" )
test_broadcast(UpperCAmelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(UpperCAmelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(UpperCAmelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 32
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase : Any = """sew-d"""
def __init__( self : Optional[int] , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : str=768 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : Optional[Any]=3_072 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Union[str, Any]=256 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=("p2c", "c2p") , lowerCAmelCase_ : Tuple="layer_norm" , lowerCAmelCase_ : Any="gelu_python" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : List[str]=1e-7 , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : Union[str, Any]="group" , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCAmelCase_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=128 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=0.05 , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : List[str]="mean" , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=256 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : int=2 , **lowerCAmelCase_ : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = squeeze_factor
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = position_buckets
SCREAMING_SNAKE_CASE_ = share_att_key
SCREAMING_SNAKE_CASE_ = relative_attention
SCREAMING_SNAKE_CASE_ = norm_rel_ebd
SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = feature_layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
SCREAMING_SNAKE_CASE_ = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ = classifier_proj_size
@property
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 393
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small"
lowerCamelCase__: Tuple =tempfile.mkdtemp()
lowerCamelCase__: Tuple ="en_speaker_1"
lowerCamelCase__: Optional[int] ="This is a test string"
lowerCamelCase__: List[str] ="speaker_embeddings_path.json"
lowerCamelCase__: int ="speaker_embeddings"
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.get_tokenizer()
lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
lowerCamelCase__: Any =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase__: List[str] =35
lowerCamelCase__: Optional[Any] =2
lowerCamelCase__: Optional[Any] =8
lowerCamelCase__: Optional[int] ={
"semantic_prompt": np.ones(UpperCAmelCase_),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_)
lowerCamelCase__: int =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from npz file
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz")
np.savez(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from the hub
lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =self.get_tokenizer()
lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_)
lowerCamelCase__: List[Any] =processor(text=self.input_string)
lowerCamelCase__: Optional[int] =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 59
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''philschmid/bart-large-cnn-samsum'''
__UpperCAmelCase : Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCAmelCase : Optional[Any] = '''summarizer'''
__UpperCAmelCase : Any = AutoTokenizer
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM
__UpperCAmelCase : Optional[Any] = ['''text''']
__UpperCAmelCase : str = ['''text''']
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.pre_processor(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.model.generate(**UpperCamelCase__ )[0]
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
| 117
|
"""simple docstring"""
from __future__ import annotations
__snake_case = list[tuple[int, int]]
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : int = pos_x
snake_case : List[str] = pos_y
snake_case : List[Any] = (pos_y, pos_x)
snake_case : Optional[int] = goal_x
snake_case : Dict = goal_y
snake_case : Any = g_cost
snake_case : List[Any] = parent
snake_case : Union[str, Any] = self.calculate_heuristic()
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
snake_case : Optional[Any] = abs(self.pos_x - self.goal_x )
snake_case : Dict = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase__ ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
snake_case : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase__ )
snake_case : Tuple = [self.start]
snake_case : list[Node] = []
snake_case : Dict = False
def lowerCamelCase ( self ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case : Tuple = True
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
snake_case : Optional[Any] = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
snake_case : Dict = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[Node]:
'''simple docstring'''
snake_case : Dict = []
for action in delta:
snake_case : Union[str, Any] = parent.pos_x + action[1]
snake_case : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase ( self , UpperCamelCase__ ) -> Path:
'''simple docstring'''
snake_case : Optional[int] = node
snake_case : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
__snake_case = GreedyBestFirst(init, goal)
__snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case = 2
for elem in grid:
print(elem)
| 117
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase = SMALL_MODEL_IDENTIFIER
UpperCAmelCase = "pt"
UpperCAmelCase = "tf"
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = "mock_framework"
# Framework provided - return whatever the user provides
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
UpperCAmelCase = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
UpperCAmelCase = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
UpperCAmelCase = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
UpperCAmelCase = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase = MagicMock(return_value=UpperCamelCase__ )
with patch("transformers.onnx.features.is_tf_available" , UpperCamelCase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase = MagicMock(return_value=UpperCamelCase__ )
with patch("transformers.onnx.features.is_torch_available" , UpperCamelCase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase = MagicMock(return_value=UpperCamelCase__ )
UpperCAmelCase = MagicMock(return_value=UpperCamelCase__ )
with patch("transformers.onnx.features.is_tf_available" , UpperCamelCase__ ), patch(
"transformers.onnx.features.is_torch_available" , UpperCamelCase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase = MagicMock(return_value=UpperCamelCase__ )
UpperCAmelCase = MagicMock(return_value=UpperCamelCase__ )
with patch("transformers.onnx.features.is_tf_available" , UpperCamelCase__ ), patch(
"transformers.onnx.features.is_torch_available" , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 323
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __magic_name__ ( A__ ):
lowercase : Union[str, Any] ='''mra'''
def __init__( self : Tuple , UpperCamelCase__ : Any=5_02_65 , UpperCamelCase__ : int=7_68 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Tuple=30_72 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=5_12 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : str=4 , UpperCamelCase__ : Tuple="full" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=2 , **UpperCamelCase__ : int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = block_per_row
UpperCAmelCase = approx_mode
UpperCAmelCase = initial_prior_first_n_blocks
UpperCAmelCase = initial_prior_diagonal_n_blocks
| 323
| 1
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = data
_lowerCAmelCase = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _snake_case ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
return ((n << b) | (n >> (32 - b))) & 0XFF_FF_FF_FF
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
_lowerCAmelCase = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def _snake_case ( self ) -> List[Any]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = list(struct.unpack(">16L" , _lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
_lowerCAmelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.padding()
_lowerCAmelCase = self.split_blocks()
for block in self.blocks:
_lowerCAmelCase = self.expand_block(_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_lowerCAmelCase = (b & c) | ((~b) & d)
_lowerCAmelCase = 0X5A_82_79_99
elif 20 <= i < 40:
_lowerCAmelCase = b ^ c ^ d
_lowerCAmelCase = 0X6E_D9_EB_A1
elif 40 <= i < 60:
_lowerCAmelCase = (b & c) | (b & d) | (c & d)
_lowerCAmelCase = 0X8F_1B_BC_DC
elif 60 <= i < 80:
_lowerCAmelCase = b ^ c ^ d
_lowerCAmelCase = 0XCA_62_C1_D6
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (
self.rotate(_lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(_lowerCAmelCase , 30 ),
c,
d,
)
_lowerCAmelCase = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __a():
'''simple docstring'''
_lowerCAmelCase = B"Test String"
assert SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # noqa: S324
def __a():
'''simple docstring'''
_lowerCAmelCase = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_lowerCAmelCase = f.read()
else:
_lowerCAmelCase = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
print(SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCAmelCase_ :
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
pass
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFViTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFDeiTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFRobertaModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Any:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = TFCLIPVisionModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" )
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1E-3 ) )
| 489
| 0
|
"""simple docstring"""
from collections.abc import Callable
class __lowercase :
def __init__( self : Tuple ,A : Callable | None = None ):
'''simple docstring'''
# Stores actual heap items.
UpperCAmelCase__ : list = []
# Stores indexes of each item for supporting updates and deletion.
UpperCAmelCase__ : dict = {}
# Stores current size of heap.
UpperCAmelCase__ : Any = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCAmelCase__ : int = key or (lambda A : x)
def __lowercase ( self : Union[str, Any] ,A : int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self : Tuple ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self : Any ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self : List[Any] ,A : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.arr[j], self.arr[i]
def __lowercase ( self : Optional[int] ,A : int ,A : int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self : Optional[int] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self._left(A )
UpperCAmelCase__ : Dict = self._right(A )
UpperCAmelCase__ : Optional[int] = i
if left is not None and not self._cmp(A ,A ):
UpperCAmelCase__ : List[Any] = left
if right is not None and not self._cmp(A ,A ):
UpperCAmelCase__ : List[Any] = right
return valid_parent
def __lowercase ( self : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self._parent(A )
while parent is not None and not self._cmp(A ,A ):
self._swap(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ : int = parent, self._parent(A )
def __lowercase ( self : str ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(A )
def __lowercase ( self : Optional[Any] ,A : int ,A : int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase__ : Tuple = self.pos_map[item]
UpperCAmelCase__ : Dict = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def __lowercase ( self : List[Any] ,A : int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase__ : Any = self.pos_map[item]
del self.pos_map[item]
UpperCAmelCase__ : Dict = self.arr[self.size - 1]
UpperCAmelCase__ : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def __lowercase ( self : str ,A : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
UpperCAmelCase__ : List[str] = [item, self.key(A )]
UpperCAmelCase__ : Union[str, Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self : str ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
'''simple docstring'''
def __snake_case ( lowercase : int = 1_000_000 ):
snake_case_ = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
snake_case_ = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 508
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def A__ ( lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def A__ ( self ):
raise NotImplementedError()
| 23
|
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
a__ = """
import os
"""
a__ = """
def foo():
import os
return False
"""
a__ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
a__ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
a__ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
a__ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
a__ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
a__ = """
import os
try:
import bar
except:
raise ValueError()
"""
a__ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
a__ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
a__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , a )
def _UpperCAmelCase ( a : Dict , a : Tuple ):
snake_case__ = os.path.join(a , """test_file.py""" )
with open(a , """w""" ) as _tmp_file:
_tmp_file.write(a )
snake_case__ = get_imports(a )
assert parsed_imports == ["os"]
| 654
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654
| 1
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()]
_snake_case = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
_snake_case = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if save_path is not None:
save_json(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 701
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = LongformerTokenizer
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = LongformerTokenizerFast
UpperCAmelCase__ : List[str] = True
def UpperCamelCase( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_snake_case = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_snake_case = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_snake_case = {"unk_token": "<unk>"}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def UpperCamelCase( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def UpperCamelCase( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = "lower newer"
_snake_case = "lower newer"
return input_text, output_text
def UpperCamelCase( self ):
_snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = "lower newer"
_snake_case = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_snake_case = tokenizer.tokenize(lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCamelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCamelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def UpperCamelCase( self ):
_snake_case = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase( self ):
_snake_case = self.get_tokenizer()
_snake_case = "Encode this sequence."
_snake_case = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
_snake_case = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
_snake_case = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
_snake_case = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# Testing spaces after special tokens
_snake_case = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase )} ) # mask token has a left space
_snake_case = tokenizer.convert_tokens_to_ids(lowerCamelCase )
_snake_case = "Encode <mask> sequence"
_snake_case = "Encode <mask>sequence"
_snake_case = tokenizer.encode(lowerCamelCase )
_snake_case = encoded.index(lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
_snake_case = tokenizer.encode(lowerCamelCase )
_snake_case = encoded.index(lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_snake_case = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_snake_case = "A, <mask> AllenNLP sentence."
_snake_case = tokenizer_r.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
_snake_case = tokenizer_p.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def UpperCamelCase( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCamelCase )
def UpperCamelCase( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = F'''{text_of_1_token} {text_of_1_token}'''
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
| 368
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class snake_case_ :
"""simple docstring"""
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
return None
class snake_case_ :
"""simple docstring"""
def _UpperCAmelCase ( self , __a , __a , __a , __a ):
"""simple docstring"""
return None
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[int] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , 'tf' , 12 , **__a )
@require_torch
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , 'pt' , 12 , **__a )
@require_torch
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
from transformers import BertModel
A__ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__a ) )
vocab_file.flush()
A__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A__ = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , 'pt' , 12 , __a )
@require_tf
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__a , 'tf' , 12 , **__a )
A__ = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__a , 'pt' , 12 , **__a )
A__ = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self , __a , __a , __a , __a=None , **__a ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ = Path(__a ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
from transformers import BertModel
A__ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__a , __a , 'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
from transformers import TFBertModel
A__ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__a , __a , 'tf' )
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
A__ = FeatureExtractionPipeline(__a , __a )
A__ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
A__ , A__ , A__ , A__ = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = ['input_ids', 'attention_mask', 'token_type_ids']
A__ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
A__ , A__ = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 260
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE : List[Any] = CLIPImageProcessor()
SCREAMING_SNAKE_CASE : Tuple = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 260
| 1
|
"""simple docstring"""
import math
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 712
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=4 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def A__ ( self ):
"""simple docstring"""
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = True
snake_case__ : List[str] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self ):
"""simple docstring"""
lowercase = FlaxRobertaModelTester(self )
@slow
def A__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowerCAmelCase )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
| 197
| 0
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 13
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_snake_case : Optional[Any] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[str]=6.0 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Tuple="fp4" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :Optional[Any] , ) ->Any:
lowercase = load_in_abit
lowercase = load_in_abit
lowercase = llm_inta_threshold
lowercase = llm_inta_skip_modules
lowercase = llm_inta_enable_fpaa_cpu_offload
lowercase = llm_inta_has_fpaa_weight
lowercase = bnb_abit_quant_type
lowercase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase = torch.floataa
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.dtype ):
lowercase = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def SCREAMING_SNAKE_CASE( self :Any ) ->str:
if not isinstance(self.llm_inta_threshold , lowerCAmelCase__ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase__ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase__ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase__ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Tuple:
return self.load_in_abit or self.load_in_abit
def SCREAMING_SNAKE_CASE( self :List[str] ) ->int:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def SCREAMING_SNAKE_CASE( cls :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Any ) ->Optional[Any]:
lowercase = cls(**lowerCAmelCase__ )
lowercase = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
to_remove.append(lowerCAmelCase__ )
for key in to_remove:
kwargs.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Union[str, os.PathLike] ) ->Optional[int]:
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
lowercase = self.to_dict()
lowercase = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
writer.write(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Any ) ->Dict[str, Any]:
lowercase = copy.deepcopy(self.__dict__ )
lowercase = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self :int ) ->Optional[Any]:
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :bool = True ) ->str:
if use_diff is True:
lowercase = self.to_diff_dict()
else:
lowercase = self.to_dict()
return json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
def SCREAMING_SNAKE_CASE( self :int ) ->Dict[str, Any]:
lowercase = self.to_dict()
# get the default config dict
lowercase = BitsAndBytesConfig().to_dict()
lowercase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase = value
return serializable_config_dict
| 441
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = DDIMPipeline
__magic_name__ : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__magic_name__ : Any = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
__magic_name__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__magic_name__ : Optional[int] = False
def _UpperCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A__ : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
A__ : int =DDIMScheduler()
A__ : Union[str, Any] ={"unet": unet, "scheduler": scheduler}
return components
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Dict =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Any =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Optional[int] ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] ="cpu"
A__ : List[str] =self.get_dummy_components()
A__ : Union[str, Any] =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] =self.get_dummy_inputs(UpperCamelCase__ )
A__ : List[Any] =pipe(**UpperCamelCase__ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A__ : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
A__ : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
def _UpperCAmelCase ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self : Optional[Any] ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self : int ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : int ):
A__ : Optional[Any] ="google/ddpm-cifar10-32"
A__ : List[Any] =UNetaDModel.from_pretrained(UpperCamelCase__ )
A__ : Optional[int] =DDIMScheduler()
A__ : Optional[int] =DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddim.to(UpperCamelCase__ )
ddim.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[int] =torch.manual_seed(0 )
A__ : str =ddim(generator=UpperCamelCase__ , eta=0.0 , output_type="numpy" ).images
A__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Dict =np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : int ):
A__ : Dict ="google/ddpm-ema-bedroom-256"
A__ : List[str] =UNetaDModel.from_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =DDIMScheduler.from_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddpm.to(UpperCamelCase__ )
ddpm.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Dict =torch.manual_seed(0 )
A__ : int =ddpm(generator=UpperCamelCase__ , output_type="numpy" ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : int =np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 712
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=18 , UpperCamelCase__ : Union[str, Any]=30 , UpperCamelCase__ : Optional[Any]=400 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=[0.5, 0.5, 0.5] , UpperCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ):
A__ : List[Any] =size if size is not None else {"shortest_edge": 18}
A__ : str =crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ : List[Any] =parent
A__ : List[str] =batch_size
A__ : List[Any] =num_channels
A__ : List[str] =image_size
A__ : Tuple =min_resolution
A__ : int =max_resolution
A__ : List[str] =do_resize
A__ : Union[str, Any] =size
A__ : str =do_center_crop
A__ : str =crop_size
A__ : Dict =do_normalize
A__ : List[Any] =image_mean
A__ : int =image_std
def _UpperCAmelCase ( self : Any ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = LevitImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : List[Any] ):
A__ : Any =LevitImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Any ):
A__ : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def _UpperCAmelCase ( self : str ):
A__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ : str =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Dict ):
# Initialize image_processing
A__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ : Any =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ : Tuple =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCAmelCase ( self : Dict ):
# Initialize image_processing
A__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ : Dict =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ : Optional[int] =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCAmelCase ( self : Any ):
# Initialize image_processing
A__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ : List[str] =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 595
| 0
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 50 ):
__a : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 476
|
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( UpperCamelCase__: str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
A = BeautifulSoup(requests.get(UpperCamelCase__ ).text , """html.parser""" )
A = soup.findAll("""h1""" )
A = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( A_ : Optional[Any] ) -> List[str]:
__UpperCAmelCase = torch.exp(A_ )
__UpperCAmelCase = torch.sum(A_ , dim=1 ) # sum of exp(x_i)
__UpperCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(A_ ) - B / A
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: int ) -> List[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = config.output_attentions
__UpperCAmelCase = config.output_hidden_states
__UpperCAmelCase = nn.ModuleList([BertLayer(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCAmelCase = nn.ModuleList([BertHighway(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[str] ) -> Optional[Any]:
'''simple docstring'''
if (type(__lowerCAmelCase ) is float) or (type(__lowerCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCAmelCase = x
else:
__UpperCAmelCase = x
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: Any=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = ()
__UpperCAmelCase = ()
__UpperCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCAmelCase = all_hidden_states + (hidden_states,)
__UpperCAmelCase = layer_module(
__lowerCAmelCase , __lowerCAmelCase , head_mask[i] , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = layer_outputs[0]
if self.output_attentions:
__UpperCAmelCase = all_attentions + (layer_outputs[1],)
__UpperCAmelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCAmelCase = current_outputs + (all_attentions,)
__UpperCAmelCase = self.highway[i](__lowerCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCAmelCase = highway_exit[0]
__UpperCAmelCase = entropy(__lowerCAmelCase )
__UpperCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowerCAmelCase , i + 1 )
else:
__UpperCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCAmelCase = all_hidden_states + (hidden_states,)
__UpperCAmelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCAmelCase = outputs + (all_attentions,)
__UpperCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , snake_case , )
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(__lowerCAmelCase )
__UpperCAmelCase = config
__UpperCAmelCase = BertEmbeddings(__lowerCAmelCase )
__UpperCAmelCase = DeeBertEncoder(__lowerCAmelCase )
__UpperCAmelCase = BertPooler(__lowerCAmelCase )
self.init_weights()
def _UpperCAmelCase ( self: Any ) -> Optional[Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def _UpperCAmelCase ( self: List[str] ) -> Tuple:
'''simple docstring'''
return self.embeddings.word_embeddings
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = value
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Optional[int] ) -> Any:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowerCAmelCase )
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Dict=None , __lowerCAmelCase: Tuple=None , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: str=None , __lowerCAmelCase: Dict=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: Tuple=None , ) -> List[str]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCAmelCase = input_ids.size()
elif inputs_embeds is not None:
__UpperCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCAmelCase = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase )
if encoder_attention_mask is None:
__UpperCAmelCase = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase )
if token_type_ids is None:
__UpperCAmelCase = torch.zeros(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCAmelCase = self.get_extended_attention_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
__UpperCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCAmelCase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCAmelCase = self.get_head_mask(__lowerCAmelCase , self.config.num_hidden_layers )
__UpperCAmelCase = self.embeddings(
input_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase )
__UpperCAmelCase = self.encoder(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
__UpperCAmelCase = encoder_outputs[0]
__UpperCAmelCase = self.pooler(__lowerCAmelCase )
__UpperCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Dict , __lowerCAmelCase: Dict , __lowerCAmelCase: List[str] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = message
__UpperCAmelCase = exit_layer # start from 1!
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = BertPooler(__lowerCAmelCase )
__UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = encoder_outputs[0]
__UpperCAmelCase = self.pooler(__lowerCAmelCase )
# "return" pooler_output
# BertModel
__UpperCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCAmelCase = bmodel_output[1]
__UpperCAmelCase = self.dropout(__lowerCAmelCase )
__UpperCAmelCase = self.classifier(__lowerCAmelCase )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , snake_case , )
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: List[str] ) -> str:
'''simple docstring'''
super().__init__(__lowerCAmelCase )
__UpperCAmelCase = config.num_labels
__UpperCAmelCase = config.num_hidden_layers
__UpperCAmelCase = DeeBertModel(__lowerCAmelCase )
__UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Any=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Dict=None , __lowerCAmelCase: int=None , __lowerCAmelCase: str=None , __lowerCAmelCase: Optional[Any]=-1 , __lowerCAmelCase: str=False , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.num_layers
try:
__UpperCAmelCase = self.bert(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCAmelCase = outputs[1]
__UpperCAmelCase = self.dropout(__lowerCAmelCase )
__UpperCAmelCase = self.classifier(__lowerCAmelCase )
__UpperCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCAmelCase = e.message
__UpperCAmelCase = e.exit_layer
__UpperCAmelCase = outputs[0]
if not self.training:
__UpperCAmelCase = entropy(__lowerCAmelCase )
__UpperCAmelCase = []
__UpperCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase = MSELoss()
__UpperCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCAmelCase = []
for highway_exit in outputs[-1]:
__UpperCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase = MSELoss()
__UpperCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCAmelCase )
if train_highway:
__UpperCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCAmelCase = (loss,) + outputs
if not self.training:
__UpperCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 286
| 1
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a_ ( lowercase__ :Tuple ):
return x + 2
class __snake_case (unittest.TestCase ):
def __a ( self: Dict ):
__lowerCamelCase = """x = 3"""
__lowerCamelCase = {}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {"""x""": 3} )
__lowerCamelCase = """x = y"""
__lowerCamelCase = {"""y""": 5}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {"""x""": 5, """y""": 5} )
def __a ( self: Dict ):
__lowerCamelCase = """y = add_two(x)"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {"""add_two""": add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
assert result is None
assert "tried to execute add_two" in out.out
def __a ( self: Optional[int] ):
__lowerCamelCase = """x = 3"""
__lowerCamelCase = {}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {"""x""": 3} )
def __a ( self: List[str] ):
__lowerCamelCase = """test_dict = {'x': x, 'y': add_two(x)}"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {"""add_two""": add_two} , state=A_ )
self.assertDictEqual(A_ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(A_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def __a ( self: List[str] ):
__lowerCamelCase = """x = 3\ny = 5"""
__lowerCamelCase = {}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {"""x""": 3, """y""": 5} )
def __a ( self: int ):
__lowerCamelCase = """text = f'This is x: {x}.'"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A_ , {"""x""": 3, """text""": """This is x: 3."""} )
def __a ( self: Dict ):
__lowerCamelCase = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A_ , {"""x""": 3, """y""": 2} )
__lowerCamelCase = {"""x""": 8}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {"""x""": 8, """y""": 5} )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """test_list = [x, add_two(x)]"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {"""add_two""": add_two} , state=A_ )
self.assertListEqual(A_ , [3, 5] )
self.assertDictEqual(A_ , {"""x""": 3, """test_list""": [3, 5]} )
def __a ( self: Optional[int] ):
__lowerCamelCase = """y = x"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {"""x""": 3, """y""": 3} )
def __a ( self: str ):
__lowerCamelCase = """test_list = [x, add_two(x)]\ntest_list[1]"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {"""add_two""": add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {"""x""": 3, """test_list""": [3, 5]} )
__lowerCamelCase = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__lowerCamelCase = {"""x""": 3}
__lowerCamelCase = evaluate(A_ , {"""add_two""": add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """x = 0\nfor i in range(3):\n x = i"""
__lowerCamelCase = {}
__lowerCamelCase = evaluate(A_ , {"""range""": range} , state=A_ )
assert result == 2
self.assertDictEqual(A_ , {"""x""": 2, """i""": 2} )
| 281
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 281
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase__ ( lowercase__ ):
'''simple docstring'''
__snake_case : Tuple = "wav2vec2"
def __init__( self : List[Any] ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : int=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : Union[str, Any]=12 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : List[Any]="group" ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase__ : Any=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : List[str]=128 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=0.05 ,lowerCamelCase__ : Union[str, Any]=10 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=10 ,lowerCamelCase__ : Union[str, Any]=0 ,lowerCamelCase__ : List[str]=320 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[Any]=256 ,lowerCamelCase__ : List[Any]=256 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Dict="sum" ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Union[str, Any]=256 ,lowerCamelCase__ : Dict=(512, 512, 512, 512, 1500) ,lowerCamelCase__ : Optional[int]=(5, 3, 3, 1, 1) ,lowerCamelCase__ : Optional[int]=(1, 2, 3, 1, 1) ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Optional[Any]=0 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,**lowerCamelCase__ : Optional[Any] ,) -> int:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ ,pad_token_id=UpperCAmelCase__ ,bos_token_id=UpperCAmelCase__ ,eos_token_id=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 700
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = "pegasus"
__snake_case : List[Any] = ["past_key_values"]
__snake_case : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict ,lowerCamelCase__ : List[str]=50265 ,lowerCamelCase__ : int=1024 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : str=4096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : Tuple=1024 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Union[str, Any]=0 ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : Any=1 ,**lowerCamelCase__ : Dict ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.d_model
| 116
| 0
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=[30, 30] , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A=3 , __A=None , __A=8 , __A=10 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = scope
__a = n_targets
__a = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__a = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__a = num_patches + 1 + self.num_detection_tokens
def snake_case_ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__a = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__a = []
for i in range(self.batch_size ):
__a = {}
__a = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
__a = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
__a = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case_ ( self , __A , __A , __A ):
__a = YolosModel(config=_A )
model.to(_A )
model.eval()
__a = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case_ ( self , __A , __A , __A ):
__a = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
__a = model(pixel_values=_A )
__a = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__a = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case_ ( self ):
__a = self.prepare_config_and_inputs()
__a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCamelCase = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case_ ( self , __A , __A , __A=False ):
__a = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__a = []
for i in range(self.model_tester.batch_size ):
__a = {}
__a = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
__a = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
__a = labels
return inputs_dict
def snake_case_ ( self ):
__a = YolosModelTester(self )
__a = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_A )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
# in YOLOS, the seq_len is different
__a = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__a = len(_A )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
__a = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case_ ( self ):
def check_hidden_states_output(__A , __A , __A ):
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
__a = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_A , _A , _A )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def snake_case_ ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a ():
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self ):
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
__a = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(_A )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="""pt""" ).to(_A )
# forward pass
with torch.no_grad():
__a = model(inputs.pixel_values )
# verify outputs
__a = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
__a = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_A , )
__a = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1E-4 ) )
# verify postprocessing
__a = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__a = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(_A )
__a = [75, 75, 17, 63, 17]
__a = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(_A )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , _A , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , _A )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _A ) )
| 99
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A__ = float("""nan""")
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : Any ):
lowerCamelCase :Optional[int] = sys.stdout
lowerCamelCase :Optional[int] = open(__snake_case , '''a''' )
def __getattr__( self : str , __snake_case : List[str] ):
return getattr(self.stdout , __snake_case )
def snake_case ( self : Optional[int] , __snake_case : Dict ):
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __snake_case , 0 , re.M ) )
def _lowerCamelCase ( a_ : Tuple=80 , a_ : str=False):
lowerCamelCase :Tuple = []
# deal with critical env vars
lowerCamelCase :Dict = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase :Optional[int] = os.environ.get(a_ , a_)
if val is not None:
cmd.append(F"{key}={val}")
# python executable (not always needed if the script is executable)
lowerCamelCase :str = sys.executable if full_python_path else sys.executable.split('''/''')[-1]
cmd.append(a_)
# now the normal args
cmd += list(map(shlex.quote , sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase :List[Any] = []
lowerCamelCase :Optional[int] = ''''''
while len(a_) > 0:
current_line += F"{cmd.pop(0)} "
if len(a_) == 0 or len(a_) + len(cmd[0]) + 1 > max_width - 1:
lines.append(a_)
lowerCamelCase :List[Any] = ''''''
return "\\\n".join(a_)
def _lowerCamelCase ( a_ : str , a_ : Optional[int]):
# unwrap multi-line input
lowerCamelCase :Optional[int] = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd)
# remove --output_dir if any and set our own
lowerCamelCase :int = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd)
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
lowerCamelCase :Optional[Any] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def _lowerCamelCase ( a_ : Tuple , a_ : List[str] , a_ : str , a_ : List[Any] , a_ : List[str] , a_ : List[Any] , a_ : Tuple):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0 , 1_00) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222])} , )
lowerCamelCase :Union[str, Any] = subprocess.run(a_ , capture_output=a_ , text=a_)
if verbose:
print('''STDOUT''' , result.stdout)
print('''STDERR''' , result.stderr)
# save the streams
lowerCamelCase :Dict = variation.replace(''' ''' , '''-''')
with open(Path(a_) / F"log.{prefix}.stdout.txt" , '''w''') as f:
f.write(result.stdout)
with open(Path(a_) / F"log.{prefix}.stderr.txt" , '''w''') as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print('''failed''')
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''') as f:
lowerCamelCase :List[str] = json.load(a_)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _lowerCamelCase ( a_ : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : List[Any] , a_ : Dict , a_ : int , a_ : Any , a_ : str , a_ : List[str] , a_ : int , ):
lowerCamelCase :Dict = []
lowerCamelCase :List[Any] = []
lowerCamelCase :Any = F"{id}: {variation:<{longest_variation_len}}"
lowerCamelCase :int = F"{preamble}: "
lowerCamelCase :str = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(a_) , desc=a_ , leave=a_):
lowerCamelCase :Union[str, Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_)
lowerCamelCase :Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(a_):
metrics.append(a_)
results.append(a_)
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase :List[str] = F"\33[2K\r{outcome}"
if len(a_) > 0:
lowerCamelCase :Tuple = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
lowerCamelCase :Union[str, Any] = round(mean_metrics[target_metric_key] , 2)
lowerCamelCase :Optional[int] = F"{outcome} {mean_target}"
if len(a_) > 1:
results_str += F" {tuple(round(a_ , 2) for x in results)}"
print(a_)
lowerCamelCase :Union[str, Any] = variation
return mean_metrics
else:
print(a_)
return {variation_key: variation, target_metric_key: nan}
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = torch.cuda.get_device_properties(torch.device('''cuda'''))
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : str , a_ : Optional[Any] , a_ : Optional[Any]):
lowerCamelCase :Any = pd.DataFrame(a_)
lowerCamelCase :Any = '''variation'''
lowerCamelCase :List[Any] = '''diff_%'''
lowerCamelCase :Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
lowerCamelCase :Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase :Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_):
lowerCamelCase :Dict = df.apply(
lambda a_: round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase :Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase :Dict = df.reindex(a_ , axis='''columns''') # reorder cols
# capitalize
lowerCamelCase :List[str] = df.rename(str.capitalize , axis='''columns''')
# make the cols as narrow as possible
lowerCamelCase :Optional[Any] = df.rename(lambda a_: c.replace('''_''' , '''<br>''') , axis='''columns''')
lowerCamelCase :Union[str, Any] = df.rename(lambda a_: c.replace('''_''' , '''\n''') , axis='''columns''')
lowerCamelCase :Any = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt='''.2f''')]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt='''.2f''')]
print('''\n\n'''.join(a_))
def _lowerCamelCase ( ):
lowerCamelCase :Dict = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=a_ , type=a_ , required=a_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=a_ , type=a_ , nargs='''+''' , required=a_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=a_ , type=a_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=a_ , type=a_ , required=a_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=a_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=a_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=a_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=a_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase :Optional[Any] = parser.parse_args()
lowerCamelCase :List[Any] = args.output_dir
Path(a_).mkdir(exist_ok=a_)
lowerCamelCase :List[str] = get_base_command(a_ , a_)
# split each dimension into its --foo variations
lowerCamelCase :int = [list(map(str.strip , re.split(R'''\|''' , a_))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase :Optional[int] = list(map(str.strip , map(''' '''.join , itertools.product(*a_))))
lowerCamelCase :Tuple = max(len(a_) for x in variations)
# split wanted keys
lowerCamelCase :Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase :Dict = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt")
print(F"and this script's output is also piped into {report_fn}")
lowerCamelCase :Optional[Any] = Tee(a_)
print(F"\n*** Running {len(a_)} benchmarks:")
print(F"Base command: {' '.join(a_)}")
lowerCamelCase :Dict = '''variation'''
lowerCamelCase :Optional[Any] = []
for id, variation in enumerate(tqdm(a_ , desc='''Total completion: ''' , leave=a_)):
lowerCamelCase :int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ))
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_)
if __name__ == "__main__":
main()
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A__: tuple[int, int] , A__: int ) -> Optional[Any]:
__lowerCamelCase : List[Any] = position
__lowerCamelCase : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowerCamelCase : List[Any] = []
for position in positions:
__lowerCamelCase : Optional[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A__ )
return permissible_positions
def UpperCAmelCase ( A__: list[list[int]] ) -> str:
return not any(elem == 0 for row in board for elem in row )
def UpperCAmelCase ( A__: list[list[int]] , A__: tuple[int, int] , A__: int ) -> str:
if is_complete(A__ ):
return True
for position in get_valid_pos(A__ , len(A__ ) ):
__lowerCamelCase : List[Any] = position
if board[y][x] == 0:
__lowerCamelCase : Optional[int] = curr + 1
if open_knight_tour_helper(A__ , A__ , curr + 1 ):
return True
__lowerCamelCase : Any = 0
return False
def UpperCAmelCase ( A__: int ) -> str:
__lowerCamelCase : List[str] = [[0 for i in range(A__ )] for j in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
__lowerCamelCase : Any = 1
if open_knight_tour_helper(A__ , (i, j) , 1 ):
return board
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : str = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 594
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = path_or_paths
UpperCamelCase : List[str] = split if split or isinstance(lowerCamelCase , lowerCamelCase ) else "train"
UpperCamelCase : Any = features
UpperCamelCase : Optional[int] = cache_dir
UpperCamelCase : str = keep_in_memory
UpperCamelCase : str = streaming
UpperCamelCase : List[Any] = num_proc
UpperCamelCase : Optional[Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = features
UpperCamelCase : str = cache_dir
UpperCamelCase : List[Any] = keep_in_memory
UpperCamelCase : int = streaming
UpperCamelCase : int = num_proc
UpperCamelCase : Tuple = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 173
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCamelCase : str = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
__lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Tuple=False ):
snake_case__, snake_case__ : Any = create_model(
"HTSAT-tiny" , "roberta" , snake_case_ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=snake_case_ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
snake_case__ : Any = {}
snake_case__ : Optional[Any] = R".*sequential.(\d+).*"
snake_case__ : Optional[Any] = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : Dict = key.replace(snake_case_ , snake_case_ )
if re.match(snake_case_ , snake_case_ ):
# replace sequential layers with list
snake_case__ : Tuple = re.match(snake_case_ , snake_case_ ).group(1 )
snake_case__ : Union[str, Any] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(snake_case_ )//3}.linear.''' )
elif re.match(snake_case_ , snake_case_ ):
snake_case__ : str = int(re.match(snake_case_ , snake_case_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case__ : Union[str, Any] = 1 if projecton_layer == 0 else 2
snake_case__ : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case__ : int = value
snake_case__ : Optional[Any] = mixed_qkv.size(0 ) // 3
snake_case__ : List[Any] = mixed_qkv[:qkv_dim]
snake_case__ : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
snake_case__ : List[str] = query_layer
snake_case__ : Optional[int] = key_layer
snake_case__ : List[Any] = value_layer
else:
snake_case__ : Tuple = value
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ):
snake_case__, snake_case__ : List[Any] = init_clap(snake_case_ , enable_fusion=snake_case_ )
clap_model.eval()
snake_case__ : List[Any] = clap_model.state_dict()
snake_case__ : List[Any] = rename_state_dict(snake_case_ )
snake_case__ : str = ClapConfig()
snake_case__ : Optional[Any] = enable_fusion
snake_case__ : str = ClapModel(snake_case_ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case_ , strict=snake_case_ )
model.save_pretrained(snake_case_ )
transformers_config.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 25
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Any = "laion/clap-htsat-unfused"
snake_case : List[str] = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE (self : List[str] , **snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any , **snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
snake_case : Optional[int] = self.get_tokenizer()
snake_case : Optional[int] = self.get_feature_extractor()
snake_case : Tuple = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
snake_case : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case : int = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Optional[int] = self.get_feature_extractor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : int = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Dict = self.get_feature_extractor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : int = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
snake_case : Optional[int] = floats_list((3, 10_00) )
snake_case : Tuple = feature_extractor(snake_case__ , return_tensors="np" )
snake_case : int = processor(audios=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.get_feature_extractor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Tuple = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
snake_case : int = "This is a test string"
snake_case : Any = processor(text=snake_case__ )
snake_case : Tuple = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Tuple = self.get_feature_extractor()
snake_case : str = self.get_tokenizer()
snake_case : Optional[Any] = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
snake_case : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : List[str] = processor.batch_decode(snake_case__ )
snake_case : Optional[Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = self.get_feature_extractor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Dict = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 204
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase ( __lowerCamelCase : int = 8 ):
snake_case : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__lowerCamelCase )
snake_case : Any = i // 3
snake_case : Optional[int] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case : Tuple = (
chars_incl
+ random(__lowerCamelCase , quotient + remainder )
+ random(__lowerCamelCase , __lowerCamelCase )
+ random(__lowerCamelCase , __lowerCamelCase )
)
snake_case : Optional[Any] = list(__lowerCamelCase )
shuffle(__lowerCamelCase )
return "".join(__lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ):
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str ):
pass # Put your code here...
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
pass # Put your code here...
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
pass # Put your code here...
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int = 8 ):
if len(__lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case : Dict = any(char in ascii_uppercase for char in password )
snake_case : Optional[int] = any(char in ascii_lowercase for char in password )
snake_case : str = any(char in digits for char in password )
snake_case : str = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase ( ):
snake_case : int = int(input("Please indicate the max length of your password: " ).strip() )
snake_case : Union[str, Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(__lowerCamelCase , __lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 204
| 1
|
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase: str = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Any ):
'''simple docstring'''
UpperCAmelCase: Union[str, Any] = 0
while b > 0:
if b & 1:
UpperCAmelCase: List[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 717
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ : Optional[int] = '\nimport os\n'
snake_case_ : str = '\ndef foo():\n import os\n return False\n'
snake_case_ : List[str] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ : int = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ : Optional[int] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ : Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ : Optional[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ : List[str] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , snake_case_ )
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Tuple ):
'''simple docstring'''
UpperCAmelCase: List[Any] = os.path.join(snake_case_ , "test_file.py" )
with open(snake_case_ , "w" ) as _tmp_file:
_tmp_file.write(snake_case_ )
UpperCAmelCase: Tuple = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 166
| 0
|
def a (lowerCAmelCase__ = 10 , lowerCAmelCase__ = 1_000 , lowerCAmelCase__ = True ):
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
return int((number_a + number_a) / 2 )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(lowerCAmelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
__a = lower
__a = higher
__a = []
while True:
__a = get_avg(lowerCAmelCase__ , lowerCAmelCase__ )
last_numbers.append(lowerCAmelCase__ )
if answer(lowerCAmelCase__ ) == "low":
__a = number
elif answer(lowerCAmelCase__ ) == "high":
__a = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def a ():
__a = int(input("""Enter lower value : """ ).strip() )
__a = int(input("""Enter high value : """ ).strip() )
__a = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 99
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_UpperCamelCase = 1
while K:
_UpperCamelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_UpperCamelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 111
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_SCREAMING_SNAKE_CASE = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_SCREAMING_SNAKE_CASE = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
_SCREAMING_SNAKE_CASE = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any] ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def lowerCAmelCase ( self : int , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Optional[int] = CHRF.CHAR_ORDER , snake_case_ : List[Any] = CHRF.WORD_ORDER , snake_case_ : int = CHRF.BETA , snake_case_ : List[str] = False , snake_case_ : Union[str, Any] = False , snake_case_ : Tuple = False , ):
__snake_case = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__snake_case = [[refs[i] for refs in references] for i in range(__a )]
__snake_case = CHRF(__a , __a , __a , __a , __a , __a )
__snake_case = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 712
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : TreeNode | None = None
_SCREAMING_SNAKE_CASE : TreeNode | None = None
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
def is_valid_tree(SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'deta'
_a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=900 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : List[Any]=6 , lowerCAmelCase : Optional[int]=2048 , lowerCAmelCase : Any=8 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : Union[str, Any]=1024 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Union[str, Any]="relu" , lowerCAmelCase : List[str]=256 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Optional[Any]=1.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=False , lowerCAmelCase : Dict="sine" , lowerCAmelCase : List[str]=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : str=True , lowerCAmelCase : List[str]=300 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : str=5 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=0.25 , **lowerCAmelCase : List[str] , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = backbone_config.pop("""model_type""" )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(lowerCAmelCase )
lowerCAmelCase = backbone_config
lowerCAmelCase = num_queries
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
# deformable attributes
lowerCAmelCase = num_feature_levels
lowerCAmelCase = encoder_n_points
lowerCAmelCase = decoder_n_points
lowerCAmelCase = two_stage
lowerCAmelCase = two_stage_num_proposals
lowerCAmelCase = with_box_refine
lowerCAmelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
lowerCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __lowercase ( self : str ):
return self.encoder_attention_heads
@property
def __lowercase ( self : List[Any] ):
return self.d_model
def __lowercase ( self : Tuple ):
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 169
|
"""simple docstring"""
from __future__ import annotations
import math
def lowercase (snake_case__ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(snake_case__ )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 169
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : List[str] = 'vit'
def __init__( self : Optional[Any] , __lowercase : Tuple=7_68 , __lowercase : Dict=12 , __lowercase : int=12 , __lowercase : Tuple=30_72 , __lowercase : Union[str, Any]="gelu" , __lowercase : Dict=0.0 , __lowercase : str=0.0 , __lowercase : Optional[int]=0.02 , __lowercase : int=1e-12 , __lowercase : List[str]=2_24 , __lowercase : Union[str, Any]=16 , __lowercase : Optional[Any]=3 , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=16 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(**__lowercase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return 1e-4
| 486
|
from __future__ import annotations
UpperCamelCase__ : str = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A_( A , A , A , A , A , ):
UpperCAmelCase_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A ) )
] # the reference grid
UpperCAmelCase_ = 1
UpperCAmelCase_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A ) )
] # the action grid
UpperCAmelCase_ = init[0]
UpperCAmelCase_ = init[1]
UpperCAmelCase_ = 0
UpperCAmelCase_ = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase_ = [[f, g, x, y]]
UpperCAmelCase_ = False # flag that is set when search is complete
UpperCAmelCase_ = False # flag set if we can't find expand
while not found and not resign:
if len(A ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase_ = cell.pop()
UpperCAmelCase_ = next_cell[2]
UpperCAmelCase_ = next_cell[3]
UpperCAmelCase_ = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase_ = True
else:
for i in range(len(A ) ): # to try out different valid actions
UpperCAmelCase_ = x + DIRECTIONS[i][0]
UpperCAmelCase_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase_ = g + cost
UpperCAmelCase_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase_ = 1
UpperCAmelCase_ = i
UpperCAmelCase_ = []
UpperCAmelCase_ = goal[0]
UpperCAmelCase_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase_ = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase_ = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase_ = xa
UpperCAmelCase_ = ya
invpath.append([x, y] )
UpperCAmelCase_ = []
for i in range(len(A ) ):
path.append(invpath[len(A ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase__ : Tuple = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase__ : Dict = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase__ : Optional[int] = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase__ : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase__ : Any = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase__ : Dict = 99
UpperCamelCase__ , UpperCamelCase__ : Dict = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 486
| 1
|
import os
from datetime import datetime as dt
from github import Github
__a :Dict = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def __snake_case ( ):
"""simple docstring"""
A_ = Github(os.environ["GITHUB_TOKEN"] )
A_ = g.get_repo("huggingface/diffusers" )
A_ = repo.get_issues(state="open" )
for issue in open_issues:
A_ = sorted(issue.get_comments() ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
A_ = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 86
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
| 86
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : int = 'codegen'
A_ : Union[str, Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> str:
_a = vocab_size
_a = n_ctx
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[Any]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
_a = 0
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_head
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return 13
| 713
|
"""simple docstring"""
from pathlib import Path
import fire
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : int ):
"""simple docstring"""
_a = Path(_lowerCAmelCase )
_a = Path(_lowerCAmelCase )
dest_dir.mkdir(exist_ok=_lowerCAmelCase )
for path in src_dir.iterdir():
_a = [x.rstrip() for x in list(path.open().readlines() )][:n]
_a = dest_dir.joinpath(path.name )
print(_lowerCAmelCase )
dest_path.open('''w''' ).write('''\n'''.join(_lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 285
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "nat"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : Optional[int] , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[str]=[3, 4, 6, 5] , UpperCAmelCase_ : List[str]=[2, 4, 8, 16] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=3.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Dict=1E-5 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : str , ) ->int:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Dict =patch_size
lowerCamelCase__: List[Any] =num_channels
lowerCamelCase__: Any =embed_dim
lowerCamelCase__: Optional[Any] =depths
lowerCamelCase__: Dict =len(UpperCAmelCase_)
lowerCamelCase__: List[Any] =num_heads
lowerCamelCase__: Optional[int] =kernel_size
lowerCamelCase__: Any =mlp_ratio
lowerCamelCase__: Optional[int] =qkv_bias
lowerCamelCase__: Optional[int] =hidden_dropout_prob
lowerCamelCase__: Tuple =attention_probs_dropout_prob
lowerCamelCase__: Tuple =drop_path_rate
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: Any =layer_norm_eps
lowerCamelCase__: Any =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__: Union[str, Any] =int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
lowerCamelCase__: Tuple =layer_scale_init_value
lowerCamelCase__: int =["stem"] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_) + 1)]
lowerCamelCase__ , lowerCamelCase__: Any =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
| 59
|
'''simple docstring'''
import math
import random
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : float , UpperCamelCase__ : bool = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase_ : Optional[Any] = 0.02
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
a_ : Tuple = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(UpperCamelCase__ ):
# Forward propagation
a_ : Dict = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
a_ : Dict = (expected / 100) - layer_a
# Error delta
a_ : Optional[Any] = layer_1_error * sigmoid_function(UpperCamelCase__ , UpperCamelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : str = int(input('Expected value: '))
lowerCAmelCase_ : str = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 442
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 159
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[int]=9_9 , SCREAMING_SNAKE_CASE : Dict=3_2 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=4 , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __A ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __A ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("roberta-base" , from_pt=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 159
| 1
|
def a__ ( _UpperCamelCase : int ): # noqa: E741
__lowerCamelCase = len(__UpperCamelCase )
__lowerCamelCase = 0
__lowerCamelCase = [0] * n
__lowerCamelCase = [False] * n
__lowerCamelCase = [False] * n
def dfs(_UpperCamelCase : Tuple ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : int ):
if parent == root:
out_edge_count += 1
__lowerCamelCase = True
__lowerCamelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__lowerCamelCase = dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
__lowerCamelCase = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
__lowerCamelCase = True
# AP found via cycle
if at == low[to]:
__lowerCamelCase = True
else:
__lowerCamelCase = min(low[at] ,__UpperCamelCase )
return out_edge_count
for i in range(__UpperCamelCase ):
if not visited[i]:
__lowerCamelCase = 0
__lowerCamelCase = dfs(__UpperCamelCase ,__UpperCamelCase ,-1 ,__UpperCamelCase )
__lowerCamelCase = out_edge_count > 1
for x in range(len(__UpperCamelCase ) ):
if is_art[x] is True:
print(__UpperCamelCase )
# Adjacency list of graph
a_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 175
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
UpperCamelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCamelCase ) )
return round(__UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 0
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( _UpperCAmelCase : int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_UpperCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
_UpperCAmelCase = QuantumRegister(_UpperCAmelCase , 'qr' )
_UpperCAmelCase = ClassicalRegister(_UpperCAmelCase , 'cr' )
_UpperCAmelCase = QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = number_of_qubits
for i in range(_UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCAmelCase , _UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCAmelCase , _UpperCAmelCase )
# simulate with 10000 shots
_UpperCAmelCase = Aer.get_backend('qasm_simulator' )
_UpperCAmelCase = execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_000 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_UpperCAmelCase = nums[0]
_UpperCAmelCase = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
)
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase : int = parser.parse_args()
lowercase : str = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase : int = CLIPImageProcessor()
lowercase : Optional[int] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 720
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : int , _lowerCamelCase : int) -> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCamelCase : Tuple = (low + high) // 2
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = max_subarray(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = max_subarray(_lowerCamelCase , mid + 1 , _lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = max_cross_sum(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int) -> tuple[int, int, float]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = float("-inf"), -1
__UpperCamelCase , __UpperCamelCase : Optional[int] = float("-inf"), -1
__UpperCamelCase : int | float = 0
for i in range(_lowerCamelCase , low - 1 , -1):
summ += arr[i]
if summ > left_sum:
__UpperCamelCase : Tuple = summ
__UpperCamelCase : Optional[int] = i
__UpperCamelCase : Union[str, Any] = 0
for i in range(mid + 1 , high + 1):
summ += arr[i]
if summ > right_sum:
__UpperCamelCase : Union[str, Any] = summ
__UpperCamelCase : Optional[int] = i
return max_left, max_right, (left_sum + right_sum)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> float:
'''simple docstring'''
__UpperCamelCase : Tuple = [randint(1 , _lowerCamelCase) for _ in range(_lowerCamelCase)]
__UpperCamelCase : Optional[Any] = time.time()
max_subarray(_lowerCamelCase , 0 , input_size - 1)
__UpperCamelCase : Any = time.time()
return end - start
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCamelCase : Dict = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
__UpperCamelCase : Union[str, Any] = [time_max_subarray(_lowerCamelCase) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken")
for input_size, runtime in zip(_lowerCamelCase , _lowerCamelCase):
print(_lowerCamelCase , "\t\t" , _lowerCamelCase)
plt.plot(_lowerCamelCase , _lowerCamelCase)
plt.xlabel("Number of Inputs")
plt.ylabel("Time taken in seconds")
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 94
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = GPTaTokenizer
lowerCamelCase_ = GPTaTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = {'add_prefix_space': True}
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Any =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowercase : Optional[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Tuple =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Dict ={'''unk_token''': '''<unk>'''}
lowercase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : str , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : str , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : Any ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Tuple ='''lower newer'''
lowercase : List[str] =['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : Tuple =tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Tuple =tokens + [tokenizer.unk_token]
lowercase : Any =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : List[Any] =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__ )
lowercase : Optional[Any] ='''lower newer'''
# Testing tokenization
lowercase : Any =tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Optional[Any] =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing conversion to ids without special tokens
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : List[str] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing conversion to ids with special tokens
lowercase : int =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__ )
lowercase : Union[str, Any] =tokenizer.encode(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Any =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing the unknown token
lowercase : Any =tokens + [rust_tokenizer.unk_token]
lowercase : int =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : int ):
'''simple docstring'''
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
lowercase : Union[str, Any] ='''This is a simple input'''
lowercase : Dict =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Dict =('''This is a simple input''', '''This is a pair''')
lowercase : Optional[Any] =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[str] =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowercase : Dict ='''This is a simple input'''
lowercase : Optional[int] =['''This is a simple input looooooooong''', '''This is a simple input''']
lowercase : int =('''This is a simple input''', '''This is a pair''')
lowercase : Optional[int] =[
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowercase : str =tokenizer.pad_token_id
lowercase : int =tokenizer(UpperCAmelCase__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
lowercase : List[Any] =tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors='''np''' )
lowercase : Optional[int] =tokenizer(*UpperCAmelCase__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
lowercase : Union[str, Any] =tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''$$$'''
lowercase : List[Any] =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase__ , add_bos_token=UpperCAmelCase__ )
lowercase : List[Any] ='''This is a simple input'''
lowercase : Optional[Any] =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Union[str, Any] =tokenizer.bos_token_id
lowercase : Optional[Any] =tokenizer(UpperCAmelCase__ )
lowercase : List[str] =tokenizer(UpperCAmelCase__ )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase : str =tokenizer.decode(out_s.input_ids )
lowercase : List[str] =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase : List[str] =[self.get_tokenizer(do_lower_case=UpperCAmelCase__ , add_bos_token=UpperCAmelCase__ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase : int ='''Encode this.'''
lowercase : List[str] ='''This one too please.'''
lowercase : Dict =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
encoded_sequence += tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.encode_plus(
UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , )
lowercase : Dict =encoded_sequence_dict['''input_ids''']
lowercase : str =encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
lowercase : str =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(UpperCAmelCase__ )
]
lowercase : Any =[x for x in filtered_sequence if x is not None]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase : List[Any] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCAmelCase__ )
lowercase : str ='''A photo of a cat'''
lowercase : Optional[int] =tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
lowercase : int =AutoTokenizer.from_pretrained('''./test_opt''' )
lowercase : Optional[Any] =tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=UpperCAmelCase__ )
lowercase : Any ='''A photo of a cat'''
lowercase : List[str] =tokenizer.encode(
UpperCAmelCase__ , )
# Same as above
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCAmelCase__ )
lowercase : List[str] ='''bos'''
lowercase : List[Any] =tokenizer.get_vocab()['''bos''']
lowercase : Optional[int] ='''A photo of a cat'''
lowercase : Tuple =tokenizer.encode(
UpperCAmelCase__ , )
# We changed the bos token
self.assertEqual(UpperCAmelCase__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
lowercase : Tuple =AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
lowercase : Optional[int] =tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [31957, 250, 1345, 9, 10, 4758] )
| 92
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _UpperCAmelCase :
def __init__( self : Tuple , A : Any , A : Dict=13 , A : Union[str, Any]=7 , A : List[Any]=True , A : List[Any]=True , A : Tuple=False , A : Optional[Any]=True , A : Tuple=99 , A : Tuple=32 , A : Dict=5 , A : int=4 , A : List[Any]=37 , A : Optional[int]="gelu" , A : List[str]=0.1 , A : List[Any]=0.1 , A : Optional[Any]=5_12 , A : Dict=16 , A : str=2 , A : int=0.02 , A : Optional[int]=3 , A : Tuple=4 , A : List[str]=None , ) -> Union[str, Any]:
lowercase_ : Dict = parent
lowercase_ : List[str] = batch_size
lowercase_ : int = seq_length
lowercase_ : List[str] = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : List[Any] = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : Union[str, Any] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Tuple = num_labels
lowercase_ : Union[str, Any] = num_choices
lowercase_ : Optional[int] = scope
def A ( self : str ) -> Optional[int]:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[Any] = None
if self.use_token_type_ids:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : List[str] = None
lowercase_ : str = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[Any] ) -> int:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def A ( self : List[Any] , A : Optional[Any] , A : str , A : Union[str, Any] , A : Dict , A : Optional[int] , A : str , A : Union[str, Any] ) -> Any:
lowercase_ : Optional[int] = LlamaModel(config=A )
model.to(A )
model.eval()
lowercase_ : Tuple = model(A , attention_mask=A )
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , A : Dict , A : Optional[int] , A : List[Any] , A : List[Any] , A : int , A : List[str] , A : int , A : List[Any] , A : int , ) -> Tuple:
lowercase_ : str = True
lowercase_ : str = LlamaModel(A )
model.to(A )
model.eval()
lowercase_ : str = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
lowercase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , )
lowercase_ : Dict = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , A : Optional[Any] , A : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : Dict , A : Optional[int] , A : Union[str, Any] , A : List[Any] , A : List[Any] , ) -> Tuple:
lowercase_ : Optional[Any] = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
lowercase_ : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , A : List[str] , A : Dict , A : Dict , A : int , A : Any , A : Optional[int] , A : str , A : Dict , A : Optional[Any] , ) -> int:
lowercase_ : Any = True
lowercase_ : str = True
lowercase_ : List[str] = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowercase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
lowercase_ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
lowercase_ : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
lowercase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Dict = False
def A ( self : Dict ) -> List[Any]:
lowercase_ : Any = LlamaModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def A ( self : List[Any] ) -> Union[str, Any]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> int:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : int = type
self.model_tester.create_and_check_model(*A )
def A ( self : int ) -> Optional[int]:
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = 3
lowercase_ : Dict = input_dict['''input_ids''']
lowercase_ : List[str] = input_ids.ne(1 ).to(A )
lowercase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : int = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : int = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : int ) -> Optional[int]:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = 3
lowercase_ : Tuple = '''single_label_classification'''
lowercase_ : str = input_dict['''input_ids''']
lowercase_ : Any = input_ids.ne(1 ).to(A )
lowercase_ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Any = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Any ) -> Union[str, Any]:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = 3
lowercase_ : int = '''multi_label_classification'''
lowercase_ : Optional[Any] = input_dict['''input_ids''']
lowercase_ : Dict = input_ids.ne(1 ).to(A )
lowercase_ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ : Optional[Any] = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def A ( self : Union[str, Any] ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def A ( self : int , A : int ) -> Optional[int]:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = ids_tensor([1, 10] , config.vocab_size )
lowercase_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : Optional[Any] = LlamaModel(A )
original_model.to(A )
original_model.eval()
lowercase_ : List[str] = original_model(A ).last_hidden_state
lowercase_ : int = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
lowercase_ : int = LlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
lowercase_ : Union[str, Any] = scaled_model(A ).last_hidden_state
lowercase_ : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : List[str] ) -> List[str]:
lowercase_ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowercase_ : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase_ : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Optional[int] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : Tuple ) -> str:
lowercase_ : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowercase_ : Tuple = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowercase_ : Optional[Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Union[str, Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : List[Any] ) -> Dict:
lowercase_ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowercase_ : List[Any] = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowercase_ : List[str] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Dict = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def A ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowercase_ : Union[str, Any] = model(torch.tensor(A ) )
lowercase_ : Any = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# fmt: off
lowercase_ : Optional[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def A ( self : str ) -> Tuple:
lowercase_ : List[str] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowercase_ : Any = '''Simply put, the theory of relativity states that '''
lowercase_ : Optional[Any] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase_ : Union[str, Any] = tokenizer.encode(A , return_tensors='''pt''' )
lowercase_ : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=A )
# greedy generation outputs
lowercase_ : List[str] = model.generate(A , max_new_tokens=64 , top_p=A , temperature=1 , do_sample=A )
lowercase_ : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=A )
self.assertEqual(A , A )
| 231
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""", """False""" ) ) is not True, reason="""Skipping test because should only be run when releasing minor transformers version""", )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split(), encoding='utf-8', check=lowerCamelCase, )
assert hasattr(self, 'env')
def UpperCamelCase ( self, lowerCamelCase=1) -> Optional[Any]:
"""simple docstring"""
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F'''{self.env.base_job_name}-single''', instance_count=lowerCamelCase, instance_type=self.instance_type, debugger_hook_config=lowerCamelCase, hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version='py36', )
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''')
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowercase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowercase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
_lowercase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds', 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''', 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, lowerCamelCase)
| 354
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : List[Any] = '\n'.join(lowerCamelCase_ )
Path(lowerCamelCase_ ).open('w' ).writelines(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = "patrickvonplaten/t5-tiny-random"
SCREAMING_SNAKE_CASE : List[Any] = "sshleifer/bart-tiny-random"
SCREAMING_SNAKE_CASE : int = "sshleifer/tiny-mbart"
SCREAMING_SNAKE_CASE : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCamelCase( _a ):
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : int = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
_lowercase : str = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : str = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = str(Path(self.get_auto_remove_tmp_dir()) / 'scores.json')
_lowercase : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : Any = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
run_generate()
assert Path(lowerCamelCase).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
_lowercase : Any = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : List[str] = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_lowercase : Optional[Any] = Path(self.get_auto_remove_tmp_dir())
_lowercase : Optional[Any] = str(tmp_dir / 'scores.json')
_lowercase : str = str(tmp_dir / 'val.target')
_dump_articles(lowerCamelCase, text['en'])
_dump_articles(lowerCamelCase, text['de'])
_lowercase : Tuple = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : Tuple = F'''
run_eval_search.py
{model}
{str(lowerCamelCase)}
{str(lowerCamelCase)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'])
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
with CaptureStdout() as cs:
run_search()
_lowercase : Dict = [' num_beams | length_penalty', model, 'Best score args']
_lowercase : Optional[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu')
else:
expected_strings.extend(lowerCamelCase)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase).exists()
os.remove(Path(lowerCamelCase))
| 354
| 1
|
import math
from collections.abc import Callable
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = xa
__magic_name__ : List[Any] = xa
while True:
if x_n == x_na or function(lowercase_ ) == function(lowercase_ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
__magic_name__ : int = x_na - (
function(lowercase_ ) / ((function(lowercase_ ) - function(lowercase_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__magic_name__ : Tuple = x_na
__magic_name__ : Optional[Any] = x_na
def UpperCamelCase ( _A ):
"""simple docstring"""
return math.pow(lowercase_, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 324
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_lowerCamelCase : Dict = 6_378_137.0
_lowerCamelCase : Union[str, Any] = 6_356_752.314_245
_lowerCamelCase : List[Any] = 6378137
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A__ = (b_lata + b_lata) / 2
A__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2)
A__ = cos(sigma / 2 ) ** 2
A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2)
A__ = sin(sigma / 2 ) ** 2
A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87
| 0
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _lowerCAmelCase ( __a ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase :Tuple =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
F'''{test_file} instead.''' )
_UpperCamelCase :List[str] =components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase :int =components[:-1] + [test_fn.replace(""".py""" , """""" )]
_UpperCamelCase :Optional[Any] =""".""".join(__a )
return test_module_path
def _lowerCAmelCase ( __a ) -> Dict:
'''simple docstring'''
_UpperCamelCase :List[str] =get_module_path(__a )
_UpperCamelCase :str =importlib.import_module(__a )
return test_module
def _lowerCAmelCase ( __a ) -> int:
'''simple docstring'''
_UpperCamelCase :int =[]
_UpperCamelCase :List[str] =get_test_module(__a )
for attr in dir(__a ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(__a , __a ) )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
_UpperCamelCase :Optional[Any] =[]
_UpperCamelCase :List[Any] =get_test_module(__a )
for attr in dir(__a ):
_UpperCamelCase :Optional[int] =getattr(__a , __a )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase :Optional[int] =getattr(__a , """all_model_classes""" , [] )
if len(__a ) > 0:
test_classes.append(__a )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def _lowerCAmelCase ( __a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase :Union[str, Any] =get_test_classes(__a )
_UpperCamelCase :Tuple =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def _lowerCAmelCase ( __a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase :List[str] =test_class()
if hasattr(__a , """setUp""" ):
test.setUp()
_UpperCamelCase :Optional[int] =None
if hasattr(__a , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase :List[str] =test.model_tester.__class__
return model_tester
def _lowerCAmelCase ( __a , __a ) -> Dict:
'''simple docstring'''
_UpperCamelCase :List[str] =get_test_classes(__a )
_UpperCamelCase :Union[str, Any] =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__a )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def _lowerCAmelCase ( __a , __a ) -> str:
'''simple docstring'''
_UpperCamelCase :str =get_test_classes_for_model(__a , __a )
_UpperCamelCase :Union[str, Any] =[]
for test_class in test_classes:
_UpperCamelCase :List[Any] =get_model_tester_from_test_class(__a )
if tester_class is not None:
tester_classes.append(__a )
# sort with class names
return sorted(__a , key=lambda __a : x.__name__ )
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
_UpperCamelCase :Optional[Any] =get_test_classes(__a )
_UpperCamelCase :List[str] ={test_class: get_model_tester_from_test_class(__a ) for test_class in test_classes}
return test_tester_mapping
def _lowerCAmelCase ( __a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase :Optional[Any] =get_model_classes(__a )
_UpperCamelCase :Optional[Any] ={
model_class: get_test_classes_for_model(__a , __a ) for model_class in model_classes
}
return model_test_mapping
def _lowerCAmelCase ( __a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase :List[Any] =get_model_classes(__a )
_UpperCamelCase :int ={
model_class: get_tester_classes_for_model(__a , __a ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCAmelCase ( __a ) -> Dict:
'''simple docstring'''
if isinstance(__a , __a ):
return o
elif isinstance(__a , __a ):
return o.__name__
elif isinstance(__a , (list, tuple) ):
return [to_json(__a ) for x in o]
elif isinstance(__a , __a ):
return {to_json(__a ): to_json(__a ) for k, v in o.items()}
else:
return o
| 700
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : Optional[int] = HfApi()
_lowerCamelCase : Union[str, Any] = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_lowerCamelCase : str = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_lowerCamelCase : List[str] = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_lowerCamelCase : Tuple = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_lowerCamelCase : str = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_lowerCamelCase : Dict = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_lowerCamelCase : int = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_lowerCamelCase : Dict = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_lowerCamelCase : int = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Union[str, Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith("""CompVis"""):
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
_lowerCamelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(f"{mod.modelId} has passed successfully!!!")
| 512
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 23
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE ={
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["""ConditionalDetrFeatureExtractor"""]
__SCREAMING_SNAKE_CASE =["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 89
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: str ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_ = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _A ( self: Union[str, Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(1 )
SCREAMING_SNAKE_CASE_ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(2 )
SCREAMING_SNAKE_CASE_ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(3 )
SCREAMING_SNAKE_CASE_ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 89
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = '▁'
_lowercase : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowercase : str = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowercase : Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
_lowercase : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowercase : Union[str, Any] = {'mustc': MUSTC_LANGS}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = MAX_MODEL_INPUT_SIZES
a__ : int = ["input_ids", "attention_mask"]
a__ : List[int] = []
def __init__( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Optional[Any]="<s>" , _lowercase : str="</s>" , _lowercase : List[Any]="<pad>" , _lowercase : Tuple="<unk>" , _lowercase : Union[str, Any]=False , _lowercase : Union[str, Any]=False , _lowercase : List[Any]=None , _lowercase : int=None , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : List[Any] , ):
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , do_upper_case=_lowercase , do_lower_case=_lowercase , tgt_lang=_lowercase , lang_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
__UpperCAmelCase = do_upper_case
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = load_json(_lowercase )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = spm_file
__UpperCAmelCase = load_spm(_lowercase , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCAmelCase = lang_codes
__UpperCAmelCase = LANGUAGES[lang_codes]
__UpperCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs]
__UpperCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__UpperCAmelCase = self.lang_tokens
__UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCAmelCase = {}
@property
def a ( self : List[str] ):
return len(self.encoder )
@property
def a ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def a ( self : Optional[int] , _lowercase : Optional[int] ):
__UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowercase )
def a ( self : Any , _lowercase : str ):
__UpperCAmelCase = self.lang_code_to_id[tgt_lang]
__UpperCAmelCase = [lang_code_id]
def a ( self : str , _lowercase : str ):
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def a ( self : List[Any] , _lowercase : Tuple ):
return self.encoder.get(_lowercase , self.encoder[self.unk_token] )
def a ( self : List[Any] , _lowercase : int ):
return self.decoder.get(_lowercase , self.unk_token )
def a ( self : Dict , _lowercase : List[str] ):
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCAmelCase = self.sp_model.decode(_lowercase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCAmelCase = []
else:
current_sub_tokens.append(_lowercase )
__UpperCAmelCase = self.sp_model.decode(_lowercase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Any=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
__UpperCAmelCase = [1] * len(self.prefix_tokens )
__UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def a ( self : Any ):
__UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Any , _lowercase : Dict ):
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = Path(_lowercase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowercase )
elif not os.path.isfile(self.spm_file ):
with open(_lowercase , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (str(_lowercase ), str(_lowercase ))
def lowercase__ ( snake_case_ :str , snake_case_ :Dict[str, Any] ):
__UpperCAmelCase = sentencepiece.SentencePieceProcessor(**snake_case_ )
spm.Load(str(snake_case_ ) )
return spm
def lowercase__ ( snake_case_ :str ):
with open(snake_case_ , '''r''' ) as f:
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Dict , snake_case_ :str ):
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ , indent=2 )
| 49
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
def __init__( self , a_ , a_=1_3 , a_=[3_0, 3_0] , a_=2 , a_=3 , a_=True , a_=True , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_0 , a_=0.02 , a_=3 , a_=None , a_=8 , a_=1_0 , ):
a_ : List[str] = parent
a_ : List[Any] = batch_size
a_ : Optional[int] = image_size
a_ : Tuple = patch_size
a_ : Optional[Any] = num_channels
a_ : Tuple = is_training
a_ : Union[str, Any] = use_labels
a_ : List[str] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : int = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : int = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = type_sequence_label_size
a_ : int = initializer_range
a_ : List[str] = num_labels
a_ : str = scope
a_ : Tuple = n_targets
a_ : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
a_ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
a_ : Any = num_patches + 1 + self.num_detection_tokens
def snake_case_ ( self ):
a_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
a_ : Optional[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
a_ : Dict = []
for i in range(self.batch_size ):
a_ : Any = {}
a_ : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a_ )
a_ : int = torch.rand(self.n_targets , 4 , device=a_ )
labels.append(a_ )
a_ : Dict = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case_ ( self , a_ , a_ , a_ ):
a_ : Dict = YolosModel(config=a_ )
model.to(a_ )
model.eval()
a_ : Tuple = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case_ ( self , a_ , a_ , a_ ):
a_ : Dict = YolosForObjectDetection(a_ )
model.to(a_ )
model.eval()
a_ : Optional[int] = model(pixel_values=a_ )
a_ : Tuple = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
a_ : Tuple = model(pixel_values=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case_ ( self ):
a_ : Optional[int] = self.prepare_config_and_inputs()
a_ , a_ , a_ : Tuple = config_and_inputs
a_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( a_ ,a_ ,unittest.TestCase ):
__lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case_ ( self , a_ , a_ , a_=False ):
a_ : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
a_ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
a_ : Tuple = {}
a_ : Optional[int] = torch.ones(
size=(self.model_tester.n_targets,) , device=a_ , dtype=torch.long )
a_ : List[str] = torch.ones(
self.model_tester.n_targets , 4 , device=a_ , dtype=torch.float )
labels.append(a_ )
a_ : Any = labels
return inputs_dict
def snake_case_ ( self ):
a_ : str = YolosModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=3_7 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
# YOLOS does not use inputs_embeds
pass
def snake_case_ ( self ):
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def snake_case_ ( self ):
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(a_ )
a_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : str = [*signature.parameters.keys()]
a_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def snake_case_ ( self ):
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case_ ( self ):
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : int = True
# in YOLOS, the seq_len is different
a_ : List[str] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
a_ : Tuple = True
a_ : Union[str, Any] = False
a_ : Optional[Any] = True
a_ : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
a_ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) )
a_ : int = outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ : Optional[int] = True
a_ : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
a_ : Any = model(**self._prepare_for_class(a_ , a_ ) )
a_ : Optional[int] = outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a_ : Union[str, Any] = len(a_ )
# Check attention is always last and order is fine
a_ : Tuple = True
a_ : str = True
a_ : Any = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
a_ : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
a_ : Dict = 1
self.assertEqual(out_len + added_hidden_states , len(a_ ) )
a_ : Optional[Any] = outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case_ ( self ):
def check_hidden_states_output(a_ , a_ , a_ ):
a_ : Tuple = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
a_ : Dict = model(**self._prepare_for_class(a_ , a_ ) )
a_ : List[str] = outputs.hidden_states
a_ : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a_ ) , a_ )
# YOLOS has a different seq_length
a_ : Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case_ ( self ):
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a_ )
@slow
def snake_case_ ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[str] = YolosModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowerCAmelCase_ ( ) -> List[Any]:
a_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
a_ : int = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(a_ )
a_ : Any = self.default_image_processor
a_ : Dict = prepare_img()
a_ : Tuple = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
a_ : str = model(inputs.pixel_values )
# verify outputs
a_ : int = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a_ )
a_ : Optional[int] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a_ , )
a_ : Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a_ , atol=1e-4 ) )
# verify postprocessing
a_ : str = image_processor.post_process_object_detection(
a_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
a_ : Tuple = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a_ )
a_ : Dict = [7_5, 7_5, 1_7, 6_3, 1_7]
a_ : Dict = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a_ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , a_ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , a_ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , a_ ) )
| 370
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "realm"
def __init__( self , a_=3_0_5_2_2 , a_=7_6_8 , a_=1_2_8 , a_=1_2 , a_=1_2 , a_=8 , a_=3_0_7_2 , a_="gelu_new" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=2 , a_=0.02 , a_=1e-12 , a_=2_5_6 , a_=1_0 , a_=1e-3 , a_=5 , a_=3_2_0 , a_=1_3_3_5_3_7_1_8 , a_=5_0_0_0 , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
# Common config
a_ : Optional[int] = vocab_size
a_ : List[Any] = max_position_embeddings
a_ : Optional[Any] = hidden_size
a_ : Optional[Any] = retriever_proj_size
a_ : List[str] = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Tuple = num_candidates
a_ : str = intermediate_size
a_ : Optional[int] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : List[str] = attention_probs_dropout_prob
a_ : Tuple = initializer_range
a_ : Tuple = type_vocab_size
a_ : str = layer_norm_eps
# Reader config
a_ : str = span_hidden_size
a_ : Union[str, Any] = max_span_width
a_ : Tuple = reader_layer_norm_eps
a_ : List[Any] = reader_beam_size
a_ : str = reader_seq_len
# Retrieval config
a_ : str = num_block_records
a_ : int = searcher_beam_size
| 370
| 1
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase :Dict = datasets.utils.logging.get_logger(__name__)
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ : bool = None
A_ : bool = None
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ : Union[str, Any] = datasets.Audio()
A_ : Tuple = """audio"""
A_ : Optional[Any] = AudioFolderConfig
A_ : List[str] # definition at the bottom of the script
A_ : Any = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCAmelCase :List[str] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase :str = AUDIO_EXTENSIONS
| 561
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase :Any = logging.get_logger(__name__)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__magic_name__ : Dict = json.loads(lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__magic_name__ : List[Any] = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__magic_name__ : List[Any] = json.loads(lowerCAmelCase )
if not mpi_options.get('sagemaker_mpi_enabled' , lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _A , )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
__magic_name__ : Any = torch.device('cpu' )
__magic_name__ : List[str] = 0
elif is_sagemaker_model_parallel_available():
__magic_name__ : Any = smp.local_rank()
__magic_name__ : List[Any] = torch.device('cuda' , _A )
__magic_name__ : List[str] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
__magic_name__ : Optional[Any] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
__magic_name__ : Dict = torch.device('cuda' , self.local_rank )
__magic_name__ : int = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__magic_name__ : Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__magic_name__ : str = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
__magic_name__ : List[str] = torch.device('cuda' , self.local_rank )
__magic_name__ : Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(_A )
return device
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
return not is_sagemaker_model_parallel_available()
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return False
| 561
| 1
|
import numpy
class _UpperCamelCase :
def __init__( self :str , lowerCamelCase :numpy.ndarray , lowerCamelCase :numpy.ndarray ) -> None:
UpperCAmelCase__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase__ = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase__ = numpy.zeros(output_array.shape )
def UpperCAmelCase_ ( self :List[Any] ) -> numpy.ndarray:
UpperCAmelCase__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase_ ( self :Tuple ) -> None:
UpperCAmelCase__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :numpy.ndarray , lowerCamelCase :int , lowerCamelCase :bool ) -> None:
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase__ = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :numpy.ndarray ) -> int:
UpperCAmelCase__ = input_arr
UpperCAmelCase__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase ( _lowerCAmelCase : numpy.ndarray ):
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase ( _lowerCAmelCase : numpy.ndarray ):
"""simple docstring"""
return (value) * (1 - (value))
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase__ = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCAmelCase , output_array=_lowerCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCAmelCase , iterations=10 , give_loss=_lowerCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 364
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : int = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , *lowerCamelCase :List[str] , **lowerCamelCase :Tuple ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 364
| 1
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class snake_case__ :
def UpperCAmelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
snake_case__ : Tuple = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case__ : Optional[int] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case__ : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=__lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
snake_case__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
snake_case__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case__ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case__ : int = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=__lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
snake_case__ : List[str] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
snake_case__ : int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self : int ):
snake_case__ : str = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__lowercase )
snake_case__ : str = inputs["""prompt"""]
snake_case__ : List[Any] = inputs["""generator"""]
snake_case__ : Optional[Any] = inputs["""num_inference_steps"""]
snake_case__ : int = inputs["""output_type"""]
if "image" in inputs:
snake_case__ : List[str] = inputs["""image"""]
else:
snake_case__ : Optional[Any] = None
if "mask_image" in inputs:
snake_case__ : Optional[int] = inputs["""mask_image"""]
else:
snake_case__ : str = None
if "original_image" in inputs:
snake_case__ : List[Any] = inputs["""original_image"""]
else:
snake_case__ : Optional[int] = None
snake_case__ : int = pipe.encode_prompt(__lowercase )
# inputs with prompt converted to embeddings
snake_case__ : List[Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
snake_case__ : Union[str, Any] = image
if mask_image is not None:
snake_case__ : Dict = mask_image
if original_image is not None:
snake_case__ : Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowercase , __lowercase , __lowercase )
snake_case__ : List[str] = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
snake_case__ : Tuple = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowercase , __lowercase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
snake_case__ : str = self.get_dummy_inputs(__lowercase )
snake_case__ : Dict = inputs["""generator"""]
snake_case__ : str = inputs["""num_inference_steps"""]
snake_case__ : Dict = inputs["""output_type"""]
# inputs with prompt converted to embeddings
snake_case__ : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
snake_case__ : List[str] = image
if mask_image is not None:
snake_case__ : List[str] = mask_image
if original_image is not None:
snake_case__ : Optional[Any] = original_image
snake_case__ : str = pipe_loaded(**__lowercase )[0]
snake_case__ : int = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
def UpperCAmelCase__ ( self : Dict ):
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : List[Any] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__lowercase )
snake_case__ : Optional[Any] = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
snake_case__ : Any = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
snake_case__ : List[str] = self.get_dummy_inputs(__lowercase )
snake_case__ : Tuple = pipe_loaded(**__lowercase )[0]
snake_case__ : Optional[int] = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
| 170
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__lowerCAmelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
__lowerCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
a__ : List[Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
a__ : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , )
a__ : Optional[int] = CLIPTextModel(__lowercase )
a__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
a__ : Union[str, Any] = CLIPTextModelWithProjection(__lowercase )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
a__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Tuple:
"""simple docstring"""
a__ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowercase ) ).to(__lowercase )
a__ : Union[str, Any] = image / 2 + 0.5
if str(__lowercase ).startswith("""mps""" ):
a__ : Dict = torch.manual_seed(__lowercase )
else:
a__ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ : Any = self.get_dummy_components()
a__ : List[Any] = StableDiffusionXLImgaImgPipeline(**__lowercase )
a__ : List[Any] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : Dict = self.get_dummy_inputs(__lowercase )
a__ : str = sd_pipe(**__lowercase ).images
a__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Union[str, Any] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = self.get_dummy_components()
a__ : List[str] = StableDiffusionXLImgaImgPipeline(**__lowercase )
a__ : Optional[int] = sd_pipe.to(__lowercase )
a__ : int = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
a__ : Any = self.get_dummy_inputs(__lowercase )
a__ : Optional[int] = 3 * ["""this is a negative prompt"""]
a__ : List[str] = negative_prompt
a__ : Any = 3 * [inputs["""prompt"""]]
a__ : Union[str, Any] = sd_pipe(**__lowercase )
a__ : Dict = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
a__ : Optional[Any] = self.get_dummy_inputs(__lowercase )
a__ : Dict = 3 * ["""this is a negative prompt"""]
a__ : int = 3 * [inputs.pop("""prompt""" )]
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
a__ : Any = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
a__ : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : List[Any] = np.random.RandomState(__lowercase ).standard_normal((1, 4, 6_4, 6_4) )
a__ : Dict = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
a__ : List[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Any = self.get_inputs(__lowercase )
a__ : List[str] = pipe(**__lowercase ).images
a__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 136
| 0
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =DownBlockaD # noqa F405
__A : List[str] ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =ResnetDownsampleBlockaD # noqa F405
__A : Optional[Any] ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =AttnDownBlockaD # noqa F405
__A : List[str] ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =CrossAttnDownBlockaD # noqa F405
__A : Any ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =SimpleCrossAttnDownBlockaD # noqa F405
__A : List[str] ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" ,"MPS result is not consistent" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =SkipDownBlockaD # noqa F405
__A : Any ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_skip_sample=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[str] =AttnSkipDownBlockaD # noqa F405
__A : int ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_skip_sample=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =DownEncoderBlockaD # noqa F405
__A : Dict ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = {
"in_channels": 32,
"out_channels": 32,
}
UpperCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[Any] =AttnDownEncoderBlockaD # noqa F405
__A : Dict ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = {
"in_channels": 32,
"out_channels": 32,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =UNetMidBlockaD # noqa F405
__A : Any ="mid"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = {
"in_channels": 32,
"temb_channels": 1_28,
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[str] =UNetMidBlockaDCrossAttn # noqa F405
__A : int ="mid"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =UNetMidBlockaDSimpleCrossAttn # noqa F405
__A : int ="mid"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Any = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =UpBlockaD # noqa F405
__A : int ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =ResnetUpsampleBlockaD # noqa F405
__A : int ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =CrossAttnUpBlockaD # noqa F405
__A : List[str] ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Dict = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =SimpleCrossAttnUpBlockaD # noqa F405
__A : Tuple ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case ,include_encoder_hidden_states=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =AttnUpBlockaD # noqa F405
__A : int ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
@unittest.skipIf(torch_device == "mps" ,"MPS result is not consistent" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =SkipUpBlockaD # noqa F405
__A : Dict ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =AttnSkipUpBlockaD # noqa F405
__A : List[str] ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =UpDecoderBlockaD # noqa F405
__A : Dict ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = {"in_channels": 32, "out_channels": 32}
UpperCAmelCase_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =AttnUpDecoderBlockaD # noqa F405
__A : Union[str, Any] ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = {"in_channels": 32, "out_channels": 32}
UpperCAmelCase_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_snake_case )
| 711
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =DownBlockaD # noqa F405
__A : List[str] ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =ResnetDownsampleBlockaD # noqa F405
__A : Optional[Any] ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =AttnDownBlockaD # noqa F405
__A : List[str] ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =CrossAttnDownBlockaD # noqa F405
__A : Any ="down"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =SimpleCrossAttnDownBlockaD # noqa F405
__A : List[str] ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : int = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" ,"MPS result is not consistent" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =SkipDownBlockaD # noqa F405
__A : Any ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_skip_sample=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[str] =AttnSkipDownBlockaD # noqa F405
__A : int ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_skip_sample=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =DownEncoderBlockaD # noqa F405
__A : Dict ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = {
"in_channels": 32,
"out_channels": 32,
}
UpperCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[Any] =AttnDownEncoderBlockaD # noqa F405
__A : Dict ="down"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = {
"in_channels": 32,
"out_channels": 32,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =UNetMidBlockaD # noqa F405
__A : Any ="mid"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = {
"in_channels": 32,
"temb_channels": 1_28,
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[str] =UNetMidBlockaDCrossAttn # noqa F405
__A : int ="mid"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =UNetMidBlockaDSimpleCrossAttn # noqa F405
__A : int ="mid"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Any = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =UpBlockaD # noqa F405
__A : int ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =ResnetUpsampleBlockaD # noqa F405
__A : int ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =CrossAttnUpBlockaD # noqa F405
__A : List[str] ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Dict = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =SimpleCrossAttnUpBlockaD # noqa F405
__A : Tuple ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case ,include_encoder_hidden_states=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =AttnUpBlockaD # noqa F405
__A : int ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
@unittest.skipIf(torch_device == "mps" ,"MPS result is not consistent" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =SkipUpBlockaD # noqa F405
__A : Dict ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =AttnSkipUpBlockaD # noqa F405
__A : List[str] ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =UpDecoderBlockaD # noqa F405
__A : Dict ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = {"in_channels": 32, "out_channels": 32}
UpperCAmelCase_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =AttnUpDecoderBlockaD # noqa F405
__A : Union[str, Any] ="up"
@property
def UpperCamelCase__ ( self ):
return super().get_dummy_input(include_temb=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = {"in_channels": 32, "out_channels": 32}
UpperCAmelCase_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_snake_case )
| 323
| 0
|
"""simple docstring"""
from copy import deepcopy
class _lowerCamelCase :
def __init__( self : Union[str, Any] , UpperCamelCase : list[int] | None = None , UpperCamelCase : int | None = None ) -> None:
"""simple docstring"""
if arr is None and size is not None:
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(__lowercase )
else:
raise ValueError("""Either arr or size must be specified""" )
def _lowerCAmelCase ( self : int , UpperCamelCase : list[int] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = len(__lowercase )
lowerCAmelCase__ : str = deepcopy(__lowercase )
for i in range(1 , self.size ):
lowerCAmelCase__ : Dict = self.next_(__lowercase )
if j < self.size:
self.tree[j] += self.tree[i]
def _lowerCAmelCase ( self : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase__ : List[str] = self.next_(__lowercase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : int ) -> int:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : int ) -> int:
"""simple docstring"""
return index - (index & (-index))
def _lowerCAmelCase ( self : Dict , UpperCamelCase : int , UpperCamelCase : int ) -> None:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ : Any = self.next_(__lowercase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : int , UpperCamelCase : int ) -> None:
"""simple docstring"""
self.add(__lowercase , value - self.get(__lowercase ) )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : int ) -> int:
"""simple docstring"""
if right == 0:
return 0
lowerCAmelCase__ : Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ : List[Any] = self.prev(__lowercase )
return result
def _lowerCAmelCase ( self : Any , UpperCamelCase : int , UpperCamelCase : int ) -> int:
"""simple docstring"""
return self.prefix(__lowercase ) - self.prefix(__lowercase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : int ) -> int:
"""simple docstring"""
return self.query(__lowercase , index + 1 )
def _lowerCAmelCase ( self : int , UpperCamelCase : int ) -> int:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ : Any = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ : List[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __magic_name__ ( self : List[Any] ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Dict =False
return options
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ : str =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Any ='''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__ : Optional[int] =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : int =pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 296
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self :List[str] , __snake_case :str , __snake_case :List[Any]=7 , __snake_case :List[str]=3 , __snake_case :int=30 , __snake_case :int=4_00 , __snake_case :List[str]=True , __snake_case :List[str]=None , __snake_case :Tuple=True , __snake_case :Any=[0.5, 0.5, 0.5] , __snake_case :Any=[0.5, 0.5, 0.5] , __snake_case :Union[str, Any]=True , __snake_case :str=1 / 2_55 , __snake_case :int=True , ):
'''simple docstring'''
__magic_name__ : Optional[int] =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__magic_name__ : str =parent
__magic_name__ : Tuple =batch_size
__magic_name__ : Optional[Any] =num_channels
__magic_name__ : Optional[int] =min_resolution
__magic_name__ : Optional[Any] =max_resolution
__magic_name__ : Optional[Any] =do_resize
__magic_name__ : Any =size
__magic_name__ : str =do_normalize
__magic_name__ : str =image_mean
__magic_name__ : List[Any] =image_std
__magic_name__ : Optional[int] =do_rescale
__magic_name__ : List[str] =rescale_factor
__magic_name__ : Optional[int] =do_pad
def A__ ( self :Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self :List[str] , __snake_case :Any , __snake_case :List[Any]=False ):
'''simple docstring'''
if not batched:
__magic_name__ : Any =image_inputs[0]
if isinstance(__snake_case , Image.Image ):
__magic_name__ , __magic_name__ : Dict =image.size
else:
__magic_name__ , __magic_name__ : Dict =image.shape[1], image.shape[2]
if w < h:
__magic_name__ : Any =int(self.size["""shortest_edge"""] * h / w )
__magic_name__ : Dict =self.size["""shortest_edge"""]
elif w > h:
__magic_name__ : Union[str, Any] =self.size["""shortest_edge"""]
__magic_name__ : Optional[int] =int(self.size["""shortest_edge"""] * w / h )
else:
__magic_name__ : Any =self.size["""shortest_edge"""]
__magic_name__ : List[str] =self.size["""shortest_edge"""]
else:
__magic_name__ : List[str] =[]
for image in image_inputs:
__magic_name__ , __magic_name__ : Tuple =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[int] =max(__snake_case , key=lambda __snake_case : item[0] )[0]
__magic_name__ : Tuple =max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = DetaImageProcessor if is_vision_available() else None
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : int =DetaImageProcessingTester(self )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """image_mean""" ) )
self.assertTrue(hasattr(__snake_case , """image_std""" ) )
self.assertTrue(hasattr(__snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """do_rescale""" ) )
self.assertTrue(hasattr(__snake_case , """do_pad""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
def A__ ( self :Any ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : Tuple =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : str =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__magic_name__ : str =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : List[str] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Any =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__magic_name__ : Tuple =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Tuple =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__magic_name__ : Optional[int] =json.loads(f.read() )
__magic_name__ : str ={"""image_id""": 3_97_69, """annotations""": target}
# encode them
__magic_name__ : Optional[Any] =DetaImageProcessor()
__magic_name__ : Any =image_processing(images=__snake_case , annotations=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Tuple =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : Optional[int] =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : Dict =torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : int =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : Any =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : Optional[Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : List[str] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify orig_size
__magic_name__ : Optional[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : Tuple =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__magic_name__ : str =json.loads(f.read() )
__magic_name__ : Optional[Any] ={"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__magic_name__ : List[str] =pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__magic_name__ : int =DetaImageProcessor(format="""coco_panoptic""" )
__magic_name__ : Tuple =image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Union[str, Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : str =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : str =torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : Dict =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : Optional[Any] =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : int =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : List[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify masks
__magic_name__ : List[Any] =82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __snake_case )
# verify orig_size
__magic_name__ : Union[str, Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : List[Any] =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
| 367
|
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =[]
__magic_name__ : int =[]
__magic_name__ : str ={
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__magic_name__ : Dict =len(lowerCamelCase ) if (len(lowerCamelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowerCamelCase ) , """Postfix""".center(lowerCamelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase ) == 0:
stack.append(lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
while len(lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowerCamelCase ) # return Postfix as str
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Union[str, Any] =list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase ) ):
if infix[i] == "(":
__magic_name__ : str =""")""" # change "(" to ")"
elif infix[i] == ")":
__magic_name__ : int ="""(""" # change ")" to "("
return (infix_2_postfix("""""".join(lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
UpperCAmelCase_ : Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 367
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
snake_case = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
snake_case = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
snake_case = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : int ):
_lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def A_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
_lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
_lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def A_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ):
_lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def A_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict , ):
_lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase = int(parent_a[1] * 100 ) + 1
_lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
_lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
_lowerCAmelCase , _lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def A_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] = True ):
if N_POPULATION < N_SELECTED:
_lowerCAmelCase = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
_lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase , _lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _lowerCamelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
snake_case = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
snake_case = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
snake_case, snake_case, snake_case = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __snake_case ,__snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowercase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase = CLIPTextModel(lowerCAmelCase_ )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowercase = image / 2 + 0.5
if str(lowerCAmelCase_ ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowercase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowercase = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self ):
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = CycleDiffusionPipeline(**lowerCAmelCase_ )
__lowercase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowercase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowercase = pipe(**lowerCAmelCase_ )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case__ ( self ):
__lowercase = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCAmelCase_ , "half" ):
__lowercase = module.half()
__lowercase = CycleDiffusionPipeline(**lowerCAmelCase_ )
__lowercase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowercase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowercase = pipe(**lowerCAmelCase_ )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def snake_case__ ( self ):
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def snake_case__ ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case__ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def snake_case__ ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
__lowercase = init_image.resize((512, 512) )
__lowercase = "CompVis/stable-diffusion-v1-4"
__lowercase = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="scheduler" )
__lowercase = CycleDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowercase = "A black colored car"
__lowercase = "A blue colored car"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCAmelCase_ , source_prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase_ , output_type="np" , )
__lowercase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def snake_case__ ( self ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
__lowercase = init_image.resize((512, 512) )
__lowercase = "CompVis/stable-diffusion-v1-4"
__lowercase = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="scheduler" )
__lowercase = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowercase = "A black colored car"
__lowercase = "A blue colored car"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCAmelCase_ , source_prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase_ , output_type="np" , )
__lowercase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 321
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : List[str] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718
|
"""simple docstring"""
def _lowerCAmelCase(a : str ) -> str:
_SCREAMING_SNAKE_CASE =0
# if input_string is "aba" than new_input_string become "a|b|a"
_SCREAMING_SNAKE_CASE =''''''
_SCREAMING_SNAKE_CASE =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 0
# length[i] shows the length of palindromic substring with center i
_SCREAMING_SNAKE_CASE =[1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
_SCREAMING_SNAKE_CASE =0
for j in range(len(a ) ):
_SCREAMING_SNAKE_CASE =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_SCREAMING_SNAKE_CASE =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_SCREAMING_SNAKE_CASE =j - k + 1 # noqa: E741
_SCREAMING_SNAKE_CASE =j + k - 1
# update max_length and start position
if max_length < length[j]:
_SCREAMING_SNAKE_CASE =length[j]
_SCREAMING_SNAKE_CASE =j
# create that string
_SCREAMING_SNAKE_CASE =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 0
|
from ... import PretrainedConfig
UpperCAmelCase__ = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__snake_case = '''nezha'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : List[str]=21_128 , __UpperCAmelCase : Any=768 , __UpperCAmelCase : int=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Tuple=3_072 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : List[str]=1e-1_2 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : int=True , **__UpperCAmelCase : Optional[Any] , ) ->Any:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = max_relative_position
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = classifier_dropout
a = use_cache
| 117
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase_ :
'''simple docstring'''
pass
| 117
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 577
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
__a : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
__a : Union[str, Any] = _readaa(lowerCamelCase_ )
if magic != 2_0_5_1:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__a : Any = _readaa(lowerCamelCase_ )
__a : int = _readaa(lowerCamelCase_ )
__a : List[Any] = _readaa(lowerCamelCase_ )
__a : str = bytestream.read(rows * cols * num_images )
__a : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
__a : Optional[Any] = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , 'Please use tf.one_hot on tensors.' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = labels_dense.shape[0]
__a : str = numpy.arange(lowerCamelCase_ ) * num_classes
__a : Any = numpy.zeros((num_labels, num_classes) )
__a : List[str] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : int=1_0 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
__a : List[str] = _readaa(lowerCamelCase_ )
if magic != 2_0_4_9:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__a : Optional[int] = _readaa(lowerCamelCase_ )
__a : Dict = bytestream.read(lowerCamelCase_ )
__a : Union[str, Any] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class _UpperCamelCase:
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Any=dtypes.floataa , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=None , ):
'''simple docstring'''
__a , __a : List[Any] = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__a : Optional[Any] = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__a : Dict = 1_0_0_0_0
__a : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__a : List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a : Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a : str = images.astype(numpy.floataa )
__a : Optional[Any] = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__a : int = images
__a : Optional[Any] = labels
__a : Tuple = 0
__a : Tuple = 0
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self._images
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self._labels
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._num_examples
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self._epochs_completed
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=True ):
'''simple docstring'''
if fake_data:
__a : List[Any] = [1] * 7_8_4
__a : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__a : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = self.images[perma]
__a : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a : List[str] = self._num_examples - start
__a : Tuple = self._images[start : self._num_examples]
__a : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a : str = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = self.images[perm]
__a : Any = self.labels[perm]
# Start next epoch
__a : Dict = 0
__a : List[Any] = batch_size - rest_num_examples
__a : str = self._index_in_epoch
__a : List[Any] = self._images[start:end]
__a : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__a : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , 'Please write your own downloading logic.' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
__a : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
__a : str = f.size()
print('Successfully downloaded' , lowerCamelCase_ , lowerCamelCase_ , 'bytes.' )
return filepath
@deprecated(
lowerCamelCase_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Any=dtypes.floataa , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]=5_0_0_0 , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
__a : List[str] = fake()
__a : Union[str, Any] = fake()
__a : Optional[Any] = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
__a : Dict = DEFAULT_SOURCE_URL
__a : int = 'train-images-idx3-ubyte.gz'
__a : List[Any] = 'train-labels-idx1-ubyte.gz'
__a : Any = 't10k-images-idx3-ubyte.gz'
__a : Optional[int] = 't10k-labels-idx1-ubyte.gz'
__a : Optional[int] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : List[Any] = _extract_images(lowerCamelCase_ )
__a : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : str = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
__a : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : Union[str, Any] = _extract_images(lowerCamelCase_ )
__a : Dict = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : Optional[Any] = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
__a : Optional[Any] = (
'Validation size should be between 0 and '
f'''{len(lowerCamelCase_ )}. Received: {validation_size}.'''
)
raise ValueError(lowerCamelCase_ )
__a : int = train_images[:validation_size]
__a : Any = train_labels[:validation_size]
__a : Optional[Any] = train_images[validation_size:]
__a : int = train_labels[validation_size:]
__a : Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
__a : str = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
__a : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
__a : str = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 577
| 1
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(_a , _a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = emb.weight.shape
SCREAMING_SNAKE_CASE : int = nn.Linear(_a , _a , bias=_a)
SCREAMING_SNAKE_CASE : str = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _a , _a="facebook/mbart-large-en-ro" , _a=False , _a=False):
SCREAMING_SNAKE_CASE : List[str] = torch.load(_a , map_location="cpu")["model"]
remove_ignore_keys_(_a)
SCREAMING_SNAKE_CASE : List[Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = MBartConfig.from_pretrained(_a , vocab_size=_a)
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE : int = "relu"
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE : Optional[Any] = MBartForConditionalGeneration(_a)
model.model.load_state_dict(_a)
if finetuned:
SCREAMING_SNAKE_CASE : str = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 25
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 25
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str=10_24 , __UpperCamelCase : int=10_24 , __UpperCamelCase : Optional[int]=False , **__UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
A__ : Tuple = SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path='''train''' , **__UpperCamelCase )
A__ : Any = tok.pad_token_id
def get_lens(__UpperCamelCase : Optional[Any] ):
A__ : int = tqdm(
DataLoader(__UpperCamelCase , batch_size=5_12 , num_workers=8 , shuffle=__UpperCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A__ : List[str] = []
for batch in dl:
A__ : str = batch['''input_ids'''].ne(__UpperCamelCase ).sum(1 ).tolist()
A__ : Any = batch['''labels'''].ne(__UpperCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__UpperCamelCase , __UpperCamelCase ):
max_lens.append(max(__UpperCamelCase , __UpperCamelCase ) )
else:
max_lens.extend(__UpperCamelCase )
return max_lens
A__ : Dict = get_lens(__UpperCamelCase )
A__ : Tuple = SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path='''val''' , **__UpperCamelCase )
A__ : Any = get_lens(__UpperCamelCase )
pickle_save(__UpperCamelCase , train_ds.len_file )
pickle_save(__UpperCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 0
_snake_case : bool = False
_snake_case : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase__ ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_UpperCamelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , lowerCAmelCase__ )
@require_multi_gpu
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Dict = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase__ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase__ : List[Any] = torch.nn.Linear(1_00, 2_00)
lowercase__ : Dict = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase__ : Any = ''
lowercase__ : Union[str, Any] = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 98
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Union[str, Any]=32 * 8 , lowerCAmelCase__ : Dict=32 * 8 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Tuple=64 , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = is_training
_UpperCamelCase = use_auxiliary_loss
_UpperCamelCase = num_queries
_UpperCamelCase = num_channels
_UpperCamelCase = min_size
_UpperCamelCase = max_size
_UpperCamelCase = num_labels
_UpperCamelCase = hidden_dim
_UpperCamelCase = hidden_dim
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
_UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
_UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
_UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
_UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCamelCase = self.num_queries
_UpperCamelCase = self.num_labels
_UpperCamelCase = [1, 1, 1, 1]
_UpperCamelCase = self.num_channels
_UpperCamelCase = 64
_UpperCamelCase = 128
_UpperCamelCase = self.hidden_dim
_UpperCamelCase = self.hidden_dim
_UpperCamelCase = self.hidden_dim
return config
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = output.encoder_hidden_states
_UpperCamelCase = output.pixel_decoder_hidden_states
_UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_layers )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_UpperCamelCase = MaskaFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCamelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
_UpperCamelCase = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_snake_case : List[Any] = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_snake_case : Any = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
_snake_case : int = False
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = MaskaFormerModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case__ ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def snake_case__ ( self : int ) -> List[str]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ) -> str:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCamelCase = MaskaFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (self.model_tester.min_size,) * 2
_UpperCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCAmelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(),
}
_UpperCamelCase = self.model_tester.get_config()
_UpperCamelCase = MaskaFormerForUniversalSegmentation(lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCamelCase = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCamelCase = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def snake_case__ ( self : Tuple ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCamelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
model.train()
_UpperCamelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
_UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ : Tuple = 1E-4
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
_UpperCamelCase = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
_UpperCamelCase = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
# masks_queries_logits
_UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCamelCase = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_UpperCamelCase = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
_UpperCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCamelCase = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_UpperCamelCase = inputs['''pixel_values'''].to(lowerCAmelCase__ )
_UpperCamelCase = [el.to(lowerCAmelCase__ ) for el in inputs['''mask_labels''']]
_UpperCamelCase = [el.to(lowerCAmelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 98
| 1
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = len(UpperCamelCase__ )
for i in range(1 , UpperCamelCase__ ):
_lowerCamelCase : List[Any] = collection[i]
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = i - 1
while low <= high:
_lowerCamelCase : Optional[Any] = (low + high) // 2
if val < collection[mid]:
_lowerCamelCase : Optional[int] = mid - 1
else:
_lowerCamelCase : List[Any] = mid + 1
for j in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
_lowerCamelCase : Any = collection[j - 1]
_lowerCamelCase : str = val
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 702
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A )
| 11
| 0
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __A ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Any = 1_0
_UpperCamelCase : Tuple = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_UpperCamelCase : str = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [9_7], "text": ["1976"]}] * 1_0,
"id": list(range(UpperCAmelCase ) ),
} ,features=UpperCAmelCase ,)
return dataset
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=UpperCAmelCase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt"
_UpperCamelCase : Any = FILE_CONTENT
with open(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase )
return filename
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
import bza
_UpperCamelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_UpperCamelCase : Tuple = bytes(UpperCAmelCase ,"utf-8" )
with bza.open(UpperCAmelCase ,"wb" ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
import gzip
_UpperCamelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_UpperCamelCase : Any = bytes(UpperCAmelCase ,"utf-8" )
with gzip.open(UpperCAmelCase ,"wb" ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCamelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_UpperCamelCase : Optional[int] = bytes(UpperCAmelCase ,"utf-8" )
with lza.frame.open(UpperCAmelCase ,"wb" ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCamelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(UpperCAmelCase ,"w" ) as archive:
archive.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> int:
'''simple docstring'''
import tarfile
_UpperCamelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(UpperCAmelCase ,"w" ) as f:
f.add(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
import lzma
_UpperCamelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_UpperCamelCase : Optional[int] = bytes(UpperCAmelCase ,"utf-8" )
with lzma.open(UpperCAmelCase ,"wb" ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
import zipfile
_UpperCamelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Any:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCamelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_UpperCamelCase : Dict = bytes(UpperCAmelCase ,"utf-8" )
with zstd.open(UpperCAmelCase ,"wb" ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.xml"
_UpperCamelCase : str = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase )
return filename
lowerCAmelCase_ : Any = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
lowerCAmelCase_ : List[str] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
lowerCAmelCase_ : Union[str, Any] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : int = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
lowerCAmelCase_ : Tuple = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def __A ( ) -> Dict:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = datasets.Dataset.from_dict(UpperCAmelCase )
_UpperCamelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(UpperCAmelCase ) ) as con:
_UpperCamelCase : List[Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Any:
'''simple docstring'''
_UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(UpperCAmelCase ,"w" ,newline="" ) as f:
_UpperCamelCase : str = csv.DictWriter(UpperCAmelCase ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(UpperCAmelCase ,"w" ,newline="" ) as f:
_UpperCamelCase : Dict = csv.DictWriter(UpperCAmelCase ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
import bza
_UpperCamelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(UpperCAmelCase ,"rb" ) as f:
_UpperCamelCase : str = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCAmelCase ,"wb" ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.basename(csv_path.replace(".csv" ,".CSV" ) ) )
f.write(UpperCAmelCase ,arcname=os.path.basename(csva_path.replace(".csv" ,".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : int = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.join("main_dir" ,os.path.basename(UpperCAmelCase ) ) )
f.write(UpperCAmelCase ,arcname=os.path.join("main_dir" ,os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_UpperCamelCase : Union[str, Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(UpperCAmelCase ,"wb" ) as f:
_UpperCamelCase : Optional[Any] = pq.ParquetWriter(UpperCAmelCase ,schema=UpperCAmelCase )
_UpperCamelCase : str = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCAmelCase ) )] for k in DATA[0]} ,schema=UpperCAmelCase )
writer.write_table(UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> int:
'''simple docstring'''
_UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCamelCase : Optional[Any] = {"data": DATA}
with open(UpperCAmelCase ,"w" ) as f:
json.dump(UpperCAmelCase ,UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCamelCase : Dict = {"data": DATA_DICT_OF_LISTS}
with open(UpperCAmelCase ,"w" ) as f:
json.dump(UpperCAmelCase ,UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(UpperCAmelCase ,"w" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(UpperCAmelCase ,"w" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(UpperCAmelCase ,"w" ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCAmelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(UpperCAmelCase ,"w" ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCAmelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Any:
'''simple docstring'''
import gzip
_UpperCamelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(UpperCAmelCase ,"rb" ) as orig_file:
with gzip.open(UpperCAmelCase ,"wb" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
import gzip
_UpperCamelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(UpperCAmelCase ,"rb" ) as orig_file:
with gzip.open(UpperCAmelCase ,"wb" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.join("nested" ,os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.join("main_dir" ,os.path.basename(UpperCAmelCase ) ) )
f.write(UpperCAmelCase ,arcname=os.path.join("main_dir" ,os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(UpperCAmelCase ,"w" ) as f:
f.add(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
f.add(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(UpperCAmelCase ,"w" ) as f:
f.add(UpperCAmelCase ,arcname=os.path.join("nested" ,os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
_UpperCamelCase : List[str] = ["0", "1", "2", "3"]
_UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(UpperCAmelCase ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ["0", "1", "2", "3"]
_UpperCamelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(UpperCAmelCase ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = ["0", "1", "2", "3"]
_UpperCamelCase : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(UpperCAmelCase ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[str] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.join("main_dir" ,os.path.basename(UpperCAmelCase ) ) )
f.write(UpperCAmelCase ,arcname=os.path.join("main_dir" ,os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.basename("unsupported.ext" ) )
f.write(UpperCAmelCase ,arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_UpperCamelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(UpperCAmelCase ,"w" ,encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope="session" )
def __A ( ) -> Dict:
'''simple docstring'''
return os.path.join("tests" ,"features" ,"data" ,"test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __A ( ) -> List[Any]:
'''simple docstring'''
return os.path.join("tests" ,"features" ,"data" ,"test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> int:
'''simple docstring'''
_UpperCamelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(UpperCAmelCase ,"w" ) as f:
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase ,arcname=os.path.basename(UpperCAmelCase ).replace(".jpg" ,"2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __A ( UpperCAmelCase ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / "subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 1_0 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" ,"w" ) as f:
f.write("bar\n" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / ".subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 1_0 )
return data_dir
| 435
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase_ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowercase__ : Tuple , lowercase__ : Tuple=7 , lowercase__ : Any=3 , lowercase__ : Optional[Any]=18 , lowercase__ : int=30 , lowercase__ : Dict=400 , lowercase__ : List[Any]=None , lowercase__ : List[str]=True , lowercase__ : Optional[Any]=True , lowercase__ : Tuple=None , ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = size if size is not None else {"height": 20, "width": 20}
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Tuple = min_resolution
_UpperCamelCase : Tuple = max_resolution
_UpperCamelCase : List[Any] = size
_UpperCamelCase : Dict = do_normalize
_UpperCamelCase : Tuple = do_convert_rgb
_UpperCamelCase : str = [512, 1_024, 2_048, 4_096]
_UpperCamelCase : Optional[int] = patch_size if patch_size is not None else {"height": 16, "width": 16}
def snake_case__ ( self : int ) ->Any:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : str = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCamelCase : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def snake_case__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = PixaStructImageProcessingTester(self )
@property
def snake_case__ ( self : Any ) ->Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def snake_case__ ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
_UpperCamelCase : int = 2_048
_UpperCamelCase : List[Any] = image_processor(lowercase__ , return_tensors="pt" , max_patches=lowercase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def snake_case__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_UpperCamelCase : Tuple = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : List[str] = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_UpperCamelCase : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCamelCase : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowercase__ ):
_UpperCamelCase : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
_UpperCamelCase : List[Any] = "Hello"
_UpperCamelCase : List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ , header_text=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Tuple = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ , header_text=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
_UpperCamelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Union[str, Any] = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase : Optional[int] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Any = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def snake_case__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : int = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCamelCase : Optional[int] = 3
@property
def snake_case__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def snake_case__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_UpperCamelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Tuple = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 435
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case = k.replace(A__ , A__ )
if k.startswith("encoder" ):
snake_case = k.replace(".attn" , ".self_attn" )
snake_case = k.replace("norm1" , "self_attn_layer_norm" )
snake_case = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
snake_case = k.replace("norm1" , "self_attn_layer_norm" )
snake_case = k.replace("norm2" , "encoder_attn_layer_norm" )
snake_case = k.replace("norm3" , "final_layer_norm" )
return k
def lowercase_ ( A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
snake_case = sd.pop(A__ )
snake_case = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
snake_case = v
_A = ["START"]
@torch.no_grad()
def lowercase_ ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = torch.load(A__ , map_location="cpu" )
snake_case = model["model"]
snake_case = BlenderbotConfig.from_json_file(A__ )
snake_case = BlenderbotForConditionalGeneration(A__ )
snake_case = m.model.state_dict().keys()
snake_case = []
snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 294
|
import sys
_A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = 1
for digit in s:
product *= int(A__ )
return product
def lowercase_ ( A__ = N ) -> int:
"""simple docstring"""
snake_case = -sys.maxsize - 1
snake_case = n[:13]
snake_case = 13
while cur_index < len(A__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case = max(A__ , str_eval(A__ ) )
snake_case = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 294
| 1
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ ,A_ ,A_ = equationa
A_ ,A_ ,A_ = equationa
# Calculate the determinants of the matrices
A_ = aa * ba - aa * ba
A_ = ca * ba - ca * ba
A_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ = determinant_x / determinant
A_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 203
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowercase :
def __init__( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=1_3 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=9_9 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : int=3_7 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : int=5_1_2 , lowerCamelCase__ : Optional[Any]=1_6 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[str]=0.02 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]="None" , lowerCamelCase__ : int=3 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : List[str]=None , ) -> List[str]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = relative_attention
A_ = position_biased_input
A_ = pos_att_type
A_ = scope
def UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = TFDebertaVaModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
A_ = TFDebertaVaForMaskedLM(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFDebertaVaForSequenceClassification(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFDebertaVaForTokenClassification(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> Any:
"""simple docstring"""
A_ = TFDebertaVaForQuestionAnswering(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Union[str, Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Optional[Any] = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Any = False
def UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
A_ = TFDebertaVaModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
A_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
A_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
A_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
A_ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 )
| 203
| 1
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = """Hello world! cécé herlolip"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : str = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
UpperCAmelCase__ : Tuple = roberta.model.encoder.sentence_encoder
UpperCAmelCase__ : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase__ : Optional[int] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , snake_case_ )
UpperCAmelCase__ : List[str] = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase__ : Any = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase__ : Optional[int] = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase__ : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase__ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase__ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase__ : BertLayer = model.roberta.encoder.layer[i]
UpperCAmelCase__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCAmelCase__ : RobertaAttention = layer.attention
UpperCAmelCase__ : Union[str, Any] = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase__ : Optional[Any] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase__ : Optional[Any] = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase__ : List[str] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase__ : str = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase__ : int = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase__ : Any = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase__ : Any = roberta_layer.final_layer_norm.weight
UpperCAmelCase__ : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase__ : Dict = roberta_layer.fca.weight
UpperCAmelCase__ : Any = roberta_layer.fca.bias
# output
UpperCAmelCase__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase__ : Dict = roberta_layer.fca.weight
UpperCAmelCase__ : Union[str, Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase__ : Any = roberta.model.classification_heads["mnli"].dense.weight
UpperCAmelCase__ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
UpperCAmelCase__ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase__ : Union[str, Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase__ : Tuple = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase__ : Any = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase__ : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase__ : int = roberta.model.encoder.lm_head.weight
UpperCAmelCase__ : Any = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase__ : torch.Tensor = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase__ : Any = model(snake_case_ )[0]
if classification_head:
UpperCAmelCase__ : Tuple = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
UpperCAmelCase__ : Optional[Any] = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase__ : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase__ : Optional[Any] = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
UpperCamelCase__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 703
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Tuple , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , **_A : List[str] , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase__ : Tuple = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Tuple = resample
UpperCAmelCase__ : Optional[Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : Tuple = crop_size
UpperCAmelCase__ : List[str] = do_flip_channel_order
def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Dict = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Union[str, Any] , _A : np.ndarray , _A : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(_A , data_format=_A )
def lowercase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Any = resample if resample is not None else self.resample
UpperCAmelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ : Dict = size if size is not None else self.size
UpperCAmelCase__ : Any = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : List[Any] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : List[str] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : int = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Any = [self.rescale(image=_A , scale=_A ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ : Optional[Any] = [self.flip_channel_order(image=_A ) for image in images]
UpperCAmelCase__ : Any = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : List[str] = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def lowercase_ ( self : Dict , _A : Dict , _A : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase__ : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
UpperCAmelCase__ : int = target_sizes.numpy()
UpperCAmelCase__ : Any = []
for idx in range(len(_A ) ):
UpperCAmelCase__ : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
UpperCAmelCase__ : Optional[Any] = logits.argmax(dim=1 )
UpperCAmelCase__ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 312
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCAmelCase ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 76
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> str:
__lowercase : List[Any] = psutil.Process()
__lowercase : Any = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = -1
while True:
__lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = True
__lowercase : List[Any] = threading.Thread(target=self.peak_monitor )
__lowercase : Optional[int] = True
self.thread.start()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def __UpperCAmelCase ( ):
# Time
__lowercase : Union[str, Any] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( __UpperCamelCase ):
# Time
__lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
__lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" )
__lowercase : Dict = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 76
| 1
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Dict:
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
A_ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __A ( self ) -> int:
A_ = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
A_ = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
| 710
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : str ) -> str:
return "".join(sorted(_UpperCamelCase ) )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> list[str]:
return word_by_signature[signature(_UpperCamelCase )]
__snake_case : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__snake_case : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__snake_case : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__snake_case : str = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 174
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE__ : Optional[int] = tf_top_k_top_p_filtering(_lowercase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output[output != -float('''inf''' )]
SCREAMING_SNAKE_CASE__ : Dict = tf.cast(
tf.where(tf.not_equal(_lowercase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-12 )
tf.debugging.assert_equal(_lowercase , _lowercase )
@require_tf
class lowercase ( unittest.TestCase , _UpperCAmelCase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowerCamelCase : Optional[int] = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : Any ):
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
class lowercase ( tf.Module ):
def __init__( self : List[str] , _lowercase : List[Any] ):
super(_lowercase , self ).__init__()
SCREAMING_SNAKE_CASE__ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_lowercase , )
def lowercase__ ( self : List[str] , _lowercase : Tuple , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : str = self.model.generate(
input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Any = [[2, 0], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE__ : List[Any] = DummyModel(model=_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowercase , _lowercase , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : str = tf.saved_model.load(_lowercase ).signatures['''serving_default''']
for batch_size in range(1 , len(_lowercase ) + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE__ : Optional[int] = serving_func(**_lowercase )['''sequences''']
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**_lowercase , max_new_tokens=_lowercase )
tf.debugging.assert_equal(_lowercase , _lowercase )
@slow
def lowercase__ ( self : Optional[Any] ):
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Tuple = 2
class lowercase ( tf.Module ):
def __init__( self : Union[str, Any] , _lowercase : int ):
super(_lowercase , self ).__init__()
SCREAMING_SNAKE_CASE__ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_lowercase , )
def lowercase__ ( self : Dict , _lowercase : Tuple , _lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = self.model.generate(
input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[2], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : int = [[1], [1, 1]]
SCREAMING_SNAKE_CASE__ : Any = DummyModel(model=_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowercase , _lowercase , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.saved_model.load(_lowercase ).signatures['''serving_default''']
for input_row in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : str = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE__ : List[str] = serving_func(**_lowercase )['''sequences''']
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**_lowercase , max_new_tokens=_lowercase )
tf.debugging.assert_equal(_lowercase , _lowercase )
@slow
@require_tensorflow_text
def lowercase__ ( self : Optional[int] ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_lowercase )
class lowercase ( tf.keras.layers.Layer ):
def __init__( self : Any ):
super().__init__()
SCREAMING_SNAKE_CASE__ : int = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowercase , '''spiece.model''' ) , '''rb''' ).read() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , *_lowercase : Tuple , **_lowercase : str ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.tokenize(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = text.pad_model_inputs(
_lowercase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase )
return self.tokenizer.detokenize(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE__ : str = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
SCREAMING_SNAKE_CASE__ : Dict = complete_model(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.Model(_lowercase , _lowercase )
keras_model.save(_lowercase )
def lowercase__ ( self : str ):
# Has PT equivalent: this test relies on random sampling
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
SCREAMING_SNAKE_CASE__ : Any = 14
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''Hello, my dog is cute and'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_lowercase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : List[str] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE__ : List[str] = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Optional[int] ):
# Has PT equivalent: ample use of framework-specific code
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ : List[Any] = '''Hugging Face is a technology company based in New York and Paris.'''
SCREAMING_SNAKE_CASE__ : Optional[int] = bart_tokenizer(_lowercase , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ : str = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ : str = bart_model.generate(_lowercase ).numpy()
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[str]=None , **_lowercase : Union[str, Any] ):
return super().call(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ : List[Any] = bart_model.generate(_lowercase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_lowercase , _lowercase ) )
class lowercase ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : List[str] ):
return super().call(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE__ : List[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE__ : Dict = bart_model.generate(_lowercase ).numpy()
with self.assertRaises(_lowercase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowercase , foo='''bar''' )
| 35
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 5_000 ):
'''simple docstring'''
lowercase__ : int = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Tuple = pentagonal_nums[j]
lowercase__ : Union[str, Any] = pentagonal_i + pentagonal_j
lowercase__ : int = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 164
| 0
|
'''simple docstring'''
def A ( A_ : list[int] ):
snake_case : Union[str, Any] = len(A_ )
for i in range(A_ ):
for j in range(i + 1 , A_ ):
if numbers[j] < numbers[i]:
snake_case, snake_case : List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 555
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 555
| 1
|
import math
def lowercase ( __A : float , __A : float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 36
|
A_ = 256
# Modulus to hash a string
A_ = 1000003
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> bool:
"""simple docstring"""
lowercase = len(UpperCAmelCase )
lowercase = len(UpperCAmelCase )
if p_len > t_len:
return False
lowercase = 0
lowercase = 0
lowercase = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
lowercase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
lowercase = '''abc1abc12'''
lowercase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowercase = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(UpperCAmelCase, UpperCAmelCase ) and not rabin_karp(UpperCAmelCase, UpperCAmelCase )
# Test 2)
lowercase = '''ABABX'''
lowercase = '''ABABZABABYABABX'''
assert rabin_karp(UpperCAmelCase, UpperCAmelCase )
# Test 3)
lowercase = '''AAAB'''
lowercase = '''ABAAAAAB'''
assert rabin_karp(UpperCAmelCase, UpperCAmelCase )
# Test 4)
lowercase = '''abcdabcy'''
lowercase = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(UpperCAmelCase, UpperCAmelCase )
# Test 5)
lowercase = '''Lü'''
lowercase = '''Lüsai'''
assert rabin_karp(UpperCAmelCase, UpperCAmelCase )
lowercase = '''Lue'''
assert not rabin_karp(UpperCAmelCase, UpperCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 604
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] =(UniPCMultistepScheduler,)
a_ : Tuple =(("""num_inference_steps""", 25),)
def UpperCamelCase_ ( self : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**UpperCamelCase )
return config
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : str=0 , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Tuple = dict(self.forward_default_kwargs )
_snake_case : Optional[int] = kwargs.pop('num_inference_steps' , UpperCamelCase )
_snake_case : str = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**UpperCamelCase )
_snake_case : List[Any] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_snake_case : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_snake_case : Optional[Any] = scheduler_class.from_pretrained(UpperCamelCase )
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_snake_case : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case : Optional[int] = sample, sample
for t in range(UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_snake_case : int = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
_snake_case : List[Any] = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Any , UpperCamelCase : int=0 , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop('num_inference_steps' , UpperCamelCase )
_snake_case : int = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : List[Any] = self.get_scheduler_config()
_snake_case : List[str] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_snake_case : List[str] = scheduler_class.from_pretrained(UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Dict = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
_snake_case : Tuple = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if scheduler is None:
_snake_case : List[Any] = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config(**UpperCamelCase )
_snake_case : Optional[int] = scheduler_class(**UpperCamelCase )
_snake_case : int = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**UpperCamelCase )
_snake_case : Any = scheduler_class(**UpperCamelCase )
_snake_case : List[Any] = 10
_snake_case : Optional[int] = self.dummy_model()
_snake_case : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
return sample
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = dict(self.forward_default_kwargs )
_snake_case : Optional[int] = kwargs.pop('num_inference_steps' , UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_snake_case : str = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**UpperCamelCase )
_snake_case : Union[str, Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase , 'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase )
elif num_inference_steps is not None and not hasattr(UpperCamelCase , 'set_timesteps' ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Union[str, Any] = scheduler.timesteps[5]
_snake_case : str = scheduler.timesteps[6]
_snake_case : Any = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
_snake_case : Any = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = UniPCMultistepScheduler(**self.get_scheduler_config() )
_snake_case : Optional[int] = self.full_loop(scheduler=UpperCamelCase )
_snake_case : Tuple = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
_snake_case : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : int = self.full_loop(scheduler=UpperCamelCase )
_snake_case : List[Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase , prediction_type=UpperCamelCase , sample_max_value=UpperCamelCase , solver_order=UpperCamelCase , solver_type=UpperCamelCase , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase , solver_type=UpperCamelCase , prediction_type=UpperCamelCase , )
_snake_case : Optional[int] = self.full_loop(
solver_order=UpperCamelCase , solver_type=UpperCamelCase , prediction_type=UpperCamelCase , )
assert not torch.isnan(UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCamelCase )
self.check_over_configs(lower_order_final=UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=UpperCamelCase , time_step=0 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.full_loop()
_snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.full_loop(prediction_type='v_prediction' )
_snake_case : Dict = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Tuple = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config(thresholding=UpperCamelCase , dynamic_thresholding_ratio=0 )
_snake_case : int = scheduler_class(**UpperCamelCase )
_snake_case : Dict = 10
_snake_case : int = self.dummy_model()
_snake_case : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Dict = model(UpperCamelCase , UpperCamelCase )
_snake_case : int = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCamelCase_ ( self : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_snake_case : str = self.get_scheduler_config(**UpperCamelCase )
_snake_case : Dict = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 669
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_lowerCamelCase : int = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
_lowerCamelCase : int = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class __snake_case (_a ):
lowerCAmelCase__ = "whisper"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _UpperCAmelCase : List[Any]=5_1865 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Dict=6 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Tuple=6 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Dict=1536 , _UpperCAmelCase : Union[str, Any]=1536 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Any=5_0257 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : str=256 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : str=False , _UpperCAmelCase : Union[str, Any]=1500 , _UpperCAmelCase : Tuple=448 , _UpperCAmelCase : int=5_0256 , _UpperCAmelCase : int=5_0256 , _UpperCAmelCase : Union[str, Any]=5_0256 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=[220, 5_0256] , _UpperCAmelCase : str=False , _UpperCAmelCase : Union[str, Any]=256 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Optional[int]=0.05 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : List[str]=7 , **_UpperCAmelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = num_mel_bins
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Dict = encoder_layers
_lowerCAmelCase : int = encoder_attention_heads
_lowerCAmelCase : List[str] = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : str = decoder_ffn_dim
_lowerCAmelCase : Union[str, Any] = encoder_ffn_dim
_lowerCAmelCase : Any = dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : Dict = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : int = init_std
_lowerCAmelCase : List[str] = encoder_layerdrop
_lowerCAmelCase : Optional[Any] = decoder_layerdrop
_lowerCAmelCase : Dict = use_cache
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Any = max_source_positions
_lowerCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = classifier_proj_size
_lowerCAmelCase : str = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Union[str, Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : str = mask_time_min_masks
_lowerCAmelCase : List[str] = mask_feature_prob
_lowerCAmelCase : Union[str, Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
_lowerCAmelCase : Any = median_filter_width
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , suppress_tokens=_UpperCAmelCase , begin_suppress_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_lowerCAmelCase : Dict = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase : str = {0: """batch"""}
else:
_lowerCAmelCase : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction="""inputs""" )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 2_2050 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = OrderedDict()
_lowerCAmelCase : Optional[int] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , time_duration=_UpperCAmelCase , frequency=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = encoder_inputs["""input_features"""].shape[2]
_lowerCAmelCase : int = encoder_sequence_length // 2 if self.use_past else seq_length
_lowerCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Any = encoder_inputs.pop("""input_features""" )
_lowerCAmelCase : Optional[int] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
_lowerCAmelCase : Union[str, Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-3
| 429
|
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase (UpperCamelCase_ : str = "AAPL" ):
'''simple docstring'''
_lowerCAmelCase : Any = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(UpperCamelCase_ ).text , """html.parser""" )
_lowerCAmelCase : Tuple = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 429
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = torch.nn.Linear(10 , 10 )
snake_case_ = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ = Accelerator()
snake_case_ = accelerator.prepare(__lowercase )
try:
pickle.loads(pickle.dumps(__lowercase ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 139
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = FlaxAutoencoderKL
@property
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.uniform(__lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
| 139
| 1
|
# using dfs for finding eulerian path traversal
def UpperCamelCase ( _A : Optional[Any] , _A : List[Any] , _A : Dict , _A : Tuple=None )-> Tuple:
"""simple docstring"""
A__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
A__ , A__ = True, True
A__ = dfs(_A , _A , _A , _A )
return path
def UpperCamelCase ( _A : int , _A : int )-> Tuple:
"""simple docstring"""
A__ = 0
A__ = -1
for i in range(_A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
A__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase ( _A : Optional[int] , _A : List[str] )-> Optional[Any]:
"""simple docstring"""
A__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
A__ , A__ = check_circuit_or_path(_A , _A )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
A__ = 1
if check == 2:
A__ = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
A__ = dfs(_A , _A , _A )
print(_A )
def UpperCamelCase ( )-> List[Any]:
"""simple docstring"""
A__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
A__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
A__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
A__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
A__ = {
1: [],
2: []
# all degree is zero
}
A__ = 10
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
if __name__ == "__main__":
main()
| 491
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
lowerCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( self ):
A__ = self.task_name.lower()
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = """train"""
lowerCAmelCase : Tuple = """dev"""
lowerCAmelCase : Optional[Any] = """test"""
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : GlueDataTrainingArguments
lowerCAmelCase : str
lowerCAmelCase : List[InputFeatures]
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = Split.train , UpperCAmelCase__ = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , UpperCAmelCase__ , )
A__ = args
A__ = glue_processors[args.task_name]()
A__ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
A__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__ , A__ = label_list[2], label_list[1]
A__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(UpperCAmelCase__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A__ = self.processor.get_test_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A__ = examples[:limit_length]
A__ = glue_convert_examples_to_features(
UpperCAmelCase__ , UpperCAmelCase__ , max_length=args.max_seq_length , label_list=UpperCAmelCase__ , output_mode=self.output_mode , )
A__ = time.time()
torch.save(self.features , UpperCAmelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase__ ):
return self.features[i]
def __A ( self ):
return self.label_list
| 491
| 1
|
"""simple docstring"""
from typing import Any
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
if not input_list:
return []
a_ : List[Any] = [input_list.count(lowerCAmelCase__ ) for value in input_list]
a_ : int = max(lowerCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE_ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__="no", SCREAMING_SNAKE_CASE__ = default_json_config_file, SCREAMING_SNAKE_CASE__ = False ) -> Tuple:
a_ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
a_ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
a_ : str = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
a_ : int = torch.cuda.device_count()
a_ : Optional[Any] = num_gpus
a_ : int = False
if num_gpus > 1:
a_ : Any = "MULTI_GPU"
else:
a_ : int = "NO"
elif is_xpu_available() and use_xpu:
a_ : int = torch.xpu.device_count()
a_ : str = num_xpus
a_ : Tuple = False
if num_xpus > 1:
a_ : int = "MULTI_XPU"
else:
a_ : List[str] = "NO"
elif is_npu_available():
a_ : List[Any] = torch.npu.device_count()
a_ : int = num_npus
a_ : List[Any] = False
if num_npus > 1:
a_ : str = "MULTI_NPU"
else:
a_ : Union[str, Any] = "NO"
else:
a_ : Optional[Any] = 0
a_ : Optional[Any] = True
a_ : Tuple = 1
a_ : Optional[int] = "NO"
a_ : str = ClusterConfig(**SCREAMING_SNAKE_CASE__ )
config.to_json_file(SCREAMING_SNAKE_CASE__ )
return path
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : Dict = parser.add_parser("default", parents=SCREAMING_SNAKE_CASE__, help=SCREAMING_SNAKE_CASE__, formatter_class=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"--config_file", default=SCREAMING_SNAKE_CASE__, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), dest="save_location", )
parser.add_argument(
"--mixed_precision", choices=["no", "fp16", "bf16"], type=SCREAMING_SNAKE_CASE__, help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", )
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Any = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 370
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=[10, 20, 30, 40] , UpperCAmelCase__ : Tuple=[2, 2, 3, 2] , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Tuple=[2, 3, 4] , UpperCAmelCase__ : Any=None , ) ->Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict) ->Optional[int]:
'''simple docstring'''
A__ = ConvNextModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]) ->str:
'''simple docstring'''
A__ = ConvNextForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str) ->Optional[Any]:
'''simple docstring'''
A__ = ConvNextBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
'''simple docstring'''
A__ = ConvNextModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int):
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
A__ = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# verify the logits
A__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = torch.tensor([-0.0260, -0.4739, 0.1911]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@require_torch
class UpperCamelCase_ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ = ConvNextConfig
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = ConvNextModelTester(self)
| 87
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A__ = get_sagemaker_input()
else:
A__ = get_cluster_input()
return config
def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]:
"""simple docstring"""
if subparsers is not None:
A__ = subparsers.add_parser('''config''' , description=lowercase_ )
else:
A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ )
parser.add_argument(
'''--config_file''' , default=lowercase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = get_user_input()
if args.config_file is not None:
A__ = args.config_file
else:
if not os.path.isdir(lowercase_ ):
os.makedirs(lowercase_ )
A__ = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowercase_ )
else:
config.to_yaml_file(lowercase_ )
print(f"""accelerate configuration saved at {config_file}""" )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = config_command_parser()
A__ = parser.parse_args()
config_command(lowercase_ )
if __name__ == "__main__":
main()
| 87
| 1
|
"""simple docstring"""
import re
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if len(re.findall("[ATCG]" , SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681
| 0
|
"""simple docstring"""
import math
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowercase_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( UpperCAmelCase_ : float = 0.1 ) -> int:
"""simple docstring"""
A__ = 3
A__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(lowercase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 0
|
'''simple docstring'''
from collections import defaultdict
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = first_str.lower().strip()
UpperCAmelCase__ : List[str] = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase__ : List[Any] = first_str.replace(""" """ , """""" )
UpperCAmelCase__ : List[str] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
return False
# Default values for count should be 0
UpperCAmelCase__ : defaultdict[str, int] = defaultdict(UpperCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('Enter the first string ').strip()
__A =input('Enter the second string ').strip()
__A =check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 113
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__A =numpy.array([0, 0])
__A =numpy.array([0.5, 0.8_6_6_0_2_5_4])
__A =numpy.array([1, 0])
__A =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = initial_vectors
for _ in range(UpperCamelCase__ ):
UpperCAmelCase__ : Dict = iteration_step(UpperCamelCase__ )
return vectors
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase__ : Tuple = vectors[i + 1]
new_vectors.append(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = numpy.radians(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : int = numpy.cos(UpperCamelCase__ ), numpy.sin(UpperCamelCase__ )
UpperCAmelCase__ : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Any = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = zip(*UpperCamelCase__ )
plt.plot(UpperCamelCase__ , UpperCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 113
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _snake_case( UpperCAmelCase ):
__snake_case: Tuple = '''vit_msn'''
def __init__(self : Any , a : Any=7_68 , a : Dict=12 , a : Dict=12 , a : List[Any]=30_72 , a : Optional[Any]="gelu" , a : int=0.0 , a : int=0.0 , a : List[Any]=0.02 , a : int=1e-06 , a : Optional[Any]=2_24 , a : Dict=16 , a : Union[str, Any]=3 , a : Union[str, Any]=True , **a : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**a )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
| 531
|
'''simple docstring'''
def _A ( UpperCAmelCase = 1 ,UpperCAmelCase = 1000 ):
'''simple docstring'''
A__ = 1
A__ = 0
for divide_by_number in range(UpperCAmelCase ,digit + 1 ):
A__ = []
A__ = numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase ):
A__ = len(UpperCAmelCase )
A__ = divide_by_number
else:
has_been_divided.append(UpperCAmelCase )
A__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 531
| 1
|
"""simple docstring"""
def lowercase (snake_case__ : int , snake_case__ : int ) -> str:
'''simple docstring'''
while second != 0:
lowerCAmelCase = first & second
first ^= second
lowerCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input('Enter the first number: ').strip())
a = int(input('Enter the second number: ').strip())
print(f"""{add(first, second) = }""")
| 705
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase (snake_case__ : int , snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCAmelCase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase = v
else:
lowerCAmelCase = v
lowerCAmelCase = chkpt["""params"""]
lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase = chkpt["""dico_word2id"""]
lowerCAmelCase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case__ , snake_case__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 529
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ):
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
UpperCAmelCase = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
UpperCAmelCase = 1 + min([right, diagonal, down] )
UpperCAmelCase = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
UpperCAmelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
UpperCAmelCase = 1 + min([right, diagonal, down] )
UpperCAmelCase = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
UpperCAmelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase = [0]
UpperCAmelCase = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ):
UpperCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase = dp_array[row][col + 1]
UpperCAmelCase = dp_array[row + 1][col + 1]
UpperCAmelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = 0
return largest_square_area
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ):
UpperCAmelCase = [0] * (cols + 1)
UpperCAmelCase = [0] * (cols + 1)
UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase = current_row[col + 1]
UpperCAmelCase = next_row[col + 1]
UpperCAmelCase = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = 0
UpperCAmelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 447
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | str ):
UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
return n == n[::-1]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 100_0000 ):
UpperCAmelCase = 0
for i in range(1 , SCREAMING_SNAKE_CASE ):
if is_palindrome(SCREAMING_SNAKE_CASE ) and is_palindrome(bin(SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 447
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = 0.00
lowerCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
lowerCamelCase__ = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = 0.00
lowerCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCamelCase__ = f"""Resistor at index {index} has a negative value!"""
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 258
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = """owlvit_text_model"""
def __init__( self : Any , _UpperCamelCase : int=49_408 , _UpperCamelCase : Optional[Any]=512 , _UpperCamelCase : List[Any]=2_048 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : Optional[Any]=8 , _UpperCamelCase : Optional[Any]=16 , _UpperCamelCase : Tuple="quick_gelu" , _UpperCamelCase : List[str]=1e-5 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : List[str]=1.0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Optional[Any]=49_406 , _UpperCamelCase : List[Any]=49_407 , **_UpperCamelCase : int , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: Tuple = vocab_size
_lowercase: List[Any] = hidden_size
_lowercase: Optional[Any] = intermediate_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: Any = num_attention_heads
_lowercase: Dict = max_position_embeddings
_lowercase: Any = hidden_act
_lowercase: List[str] = layer_norm_eps
_lowercase: int = attention_dropout
_lowercase: int = initializer_range
_lowercase: Any = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls : Dict , _UpperCamelCase : Union[str, os.PathLike] , **_UpperCamelCase : List[Any]):
cls._set_token_in_kwargs(_UpperCamelCase)
_lowercase , _lowercase: int = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type") == "owlvit":
_lowercase: Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase)
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : str = """owlvit_vision_model"""
def __init__( self : Tuple , _UpperCamelCase : Optional[int]=768 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : str=12 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Optional[Any]="quick_gelu" , _UpperCamelCase : Any=1e-5 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=1.0 , **_UpperCamelCase : Optional[int] , ):
super().__init__(**_UpperCamelCase)
_lowercase: List[str] = hidden_size
_lowercase: List[Any] = intermediate_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: int = num_attention_heads
_lowercase: Optional[Any] = num_channels
_lowercase: List[str] = image_size
_lowercase: Union[str, Any] = patch_size
_lowercase: Tuple = hidden_act
_lowercase: Dict = layer_norm_eps
_lowercase: int = attention_dropout
_lowercase: Optional[int] = initializer_range
_lowercase: int = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , _UpperCamelCase : Union[str, os.PathLike] , **_UpperCamelCase : Union[str, Any]):
cls._set_token_in_kwargs(_UpperCamelCase)
_lowercase , _lowercase: Any = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type") == "owlvit":
_lowercase: Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase)
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = """owlvit"""
lowerCamelCase : List[str] = True
def __init__( self : List[str] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Optional[int]=2.6_5_9_2 , _UpperCamelCase : Any=True , **_UpperCamelCase : int , ):
super().__init__(**_UpperCamelCase)
if text_config is None:
_lowercase: List[str] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.")
if vision_config is None:
_lowercase: Any = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.")
_lowercase: Optional[int] = OwlViTTextConfig(**_UpperCamelCase)
_lowercase: Union[str, Any] = OwlViTVisionConfig(**_UpperCamelCase)
_lowercase: int = projection_dim
_lowercase: List[str] = logit_scale_init_value
_lowercase: List[Any] = return_dict
_lowercase: Optional[int] = 1.0
@classmethod
def UpperCAmelCase__ ( cls : int , _UpperCamelCase : Union[str, os.PathLike] , **_UpperCamelCase : Union[str, Any]):
cls._set_token_in_kwargs(_UpperCamelCase)
_lowercase , _lowercase: Dict = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase)
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase)
@classmethod
def UpperCAmelCase__ ( cls : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Dict , **_UpperCamelCase : int):
_lowercase: Any = {}
_lowercase: int = text_config
_lowercase: str = vision_config
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase)
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: Optional[int] = copy.deepcopy(self.__dict__)
_lowercase: Tuple = self.text_config.to_dict()
_lowercase: List[str] = self.vision_config.to_dict()
_lowercase: Tuple = self.__class__.model_type
return output
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Tuple):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
])
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
])
@property
def UpperCAmelCase__ ( self : str):
return 1e-4
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : "ProcessorMixin" , _UpperCamelCase : int = -1 , _UpperCamelCase : int = -1 , _UpperCamelCase : Optional["TensorType"] = None , ):
_lowercase: List[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCamelCase , seq_length=_UpperCamelCase , framework=_UpperCamelCase)
_lowercase: Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCamelCase , framework=_UpperCamelCase)
return {**text_input_dict, **image_input_dict}
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return 14
| 226
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Tuple = """ibert"""
def __init__( self : str , _UpperCamelCase : Optional[Any]=30_522 , _UpperCamelCase : List[Any]=768 , _UpperCamelCase : str=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=512 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=1e-12 , _UpperCamelCase : Any=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : int=2 , _UpperCamelCase : Any="absolute" , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]="none" , **_UpperCamelCase : Dict , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: Dict = vocab_size
_lowercase: int = hidden_size
_lowercase: Union[str, Any] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Tuple = hidden_act
_lowercase: str = intermediate_size
_lowercase: List[str] = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: Tuple = type_vocab_size
_lowercase: List[str] = initializer_range
_lowercase: Optional[int] = layer_norm_eps
_lowercase: Optional[int] = position_embedding_type
_lowercase: Any = quant_mode
_lowercase: Dict = force_dequant
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : List[str]):
if self.task == "multiple-choice":
_lowercase: List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 226
| 1
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = tempfile.mkdtemp()
lowerCAmelCase : Dict = 5
# Realm tok
lowerCAmelCase : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase : Any = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__lowerCAmelCase , )
return block_records
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.get_config()
lowerCAmelCase : int = self.get_dummy_retriever()
lowerCAmelCase : Dict = retriever.tokenizer
lowerCAmelCase : Any = np.array([0, 3] , dtype="long" )
lowerCAmelCase : List[str] = tokenizer(["Test question"] ).input_ids
lowerCAmelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
lowerCAmelCase : List[str] = config.reader_seq_len
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="np" )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.get_config()
lowerCAmelCase : Dict = self.get_dummy_retriever()
lowerCAmelCase : List[str] = retriever.tokenizer
lowerCAmelCase : str = np.array([0, 3, 5] , dtype="long" )
lowerCAmelCase : str = tokenizer(["Test question"] ).input_ids
lowerCAmelCase : Optional[int] = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
lowerCAmelCase : Optional[int] = config.reader_seq_len
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="np" )
self.assertEqual([False, True, True] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
lowerCAmelCase : List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
lowerCAmelCase : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
lowerCAmelCase : List[str] = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 710
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681
| 0
|
def UpperCamelCase ( snake_case__ : float , snake_case__ : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
UpperCamelCase : Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : list[int | str]) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case))])
def snake_case_ ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None:
if index == len(__snake_case):
print(__snake_case)
return
for i in range(len(__snake_case)):
if not index_used[i]:
current_sequence.append(sequence[i])
lowerCAmelCase_ = True
create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case)
current_sequence.pop()
lowerCAmelCase_ = False
A_ : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
A_ : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 274
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.