code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaImgaImgPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ):
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase = DDIMScheduler(**_A )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase ( self , _A , _A=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_A )
else:
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase = '''A red cartoon frog, 4k'''
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 130
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Dict) -> Optional[int]:
_A = k_size // 2
_A , _A = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_A = 1 / (2 * pi * sigma) * exp(-(square(UpperCamelCase__) + square(UpperCamelCase__)) / (2 * square(UpperCamelCase__)))
return g
def snake_case ( snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :List[Any]) -> Dict:
_A , _A = image.shape[0], image.shape[1]
# dst image height and width
_A = height - k_size + 1
_A = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_A = zeros((dst_height * dst_width, k_size * k_size))
_A = 0
for i, j in product(range(UpperCamelCase__) , range(UpperCamelCase__)):
_A = ravel(image[i : i + k_size, j : j + k_size])
_A = window
row += 1
# turn the kernel into shape(k*k, 1)
_A = gen_gaussian_kernel(UpperCamelCase__ , UpperCamelCase__)
_A = ravel(UpperCamelCase__)
# reshape and get the dst image
_A = dot(UpperCamelCase__ , UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__).astype(UpperCamelCase__)
return dst
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 3, sigma=1)
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 715
|
from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a :
lowercase_ : int = BlenderbotConfig
lowercase_ : Tuple = {}
lowercase_ : List[Any] = 'gelu'
def __init__( self : List[Any] , snake_case__ : Tuple , snake_case__ : Any=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=True , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=99 , snake_case__ : Union[str, Any]=32 , snake_case__ : List[Any]=2 , snake_case__ : int=4 , snake_case__ : Tuple=37 , snake_case__ : Tuple=0.1 , snake_case__ : str=0.1 , snake_case__ : Optional[Any]=20 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[Any]=1 , snake_case__ : Optional[int]=0 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = TFBlenderbotModel(config=snake_case__ ).get_decoder()
__lowerCAmelCase = inputs_dict["input_ids"]
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict["attention_mask"][:1, :]
__lowerCAmelCase = inputs_dict["head_mask"]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ )[0]
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any]=None , UpperCamelCase: int=None , UpperCamelCase: List[str]=None , UpperCamelCase: List[str]=None , UpperCamelCase: List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : Dict = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase_ : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase_ : str = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ : Optional[Any] = True
lowercase_ : Any = False
lowercase_ : Optional[Any] = False
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = TFBlenderbotModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
lowercase_ : Tuple = ['My friends are cool but they eat too many carbs.']
lowercase_ : Optional[Any] = 'facebook/blenderbot-400M-distill'
@cached_property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = self.tokenizer(self.src_text , return_tensors="tf" )
__lowerCAmelCase = self.model.generate(
model_inputs.input_ids , )
__lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 611
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: Any=False ):
"""simple docstring"""
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
__lowerCAmelCase = "segformer.encoder." + key
if key.startswith("backbone" ):
__lowerCAmelCase = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__lowerCAmelCase = key[key.find("patch_embed" ) + len("patch_embed" )]
__lowerCAmelCase = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(UpperCamelCase )-1}" )
if "norm" in key:
__lowerCAmelCase = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__lowerCAmelCase = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
__lowerCAmelCase = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(UpperCamelCase )-1}" )
if "layer_norm1" in key:
__lowerCAmelCase = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
__lowerCAmelCase = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
__lowerCAmelCase = key[key.find("block" ) + len("block" )]
__lowerCAmelCase = key.replace(F"block{idx}" , F"block.{int(UpperCamelCase )-1}" )
if "attn.q" in key:
__lowerCAmelCase = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
__lowerCAmelCase = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
__lowerCAmelCase = key.replace("attn" , "attention.self" )
if "fc1" in key:
__lowerCAmelCase = key.replace("fc1" , "dense1" )
if "fc2" in key:
__lowerCAmelCase = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
__lowerCAmelCase = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
__lowerCAmelCase = key.replace("linear_fuse.conv" , "linear_fuse" )
__lowerCAmelCase = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__lowerCAmelCase = key[key.find("linear_c" ) + len("linear_c" )]
__lowerCAmelCase = key.replace(F"linear_c{idx}" , F"linear_c.{int(UpperCamelCase )-1}" )
if key.startswith("head" ):
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
__lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
__lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
__lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
__lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
__lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = SegformerConfig()
__lowerCAmelCase = False
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
if "segformer" in model_name:
__lowerCAmelCase = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
__lowerCAmelCase = 1_5_0
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
__lowerCAmelCase = 1_9
__lowerCAmelCase = "cityscapes-id2label.json"
__lowerCAmelCase = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = model_name[4:6]
__lowerCAmelCase = 1_0_0_0
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1_0_0_0)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 2_5_6
elif size == "b2":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 4, 1_8, 3]
elif size == "b4":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 8, 2_7, 3]
elif size == "b5":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 6, 4_0, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
__lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
__lowerCAmelCase = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
else:
__lowerCAmelCase = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
__lowerCAmelCase = rename_keys(UpperCamelCase , encoder_only=UpperCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
__lowerCAmelCase = False
__lowerCAmelCase = SegformerForImageClassification(UpperCamelCase )
else:
__lowerCAmelCase = SegformerForSemanticSegmentation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
__lowerCAmelCase = model(UpperCamelCase )
__lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__lowerCAmelCase = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
__lowerCAmelCase = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 611
| 1
|
import re
from filelock import FileLock
try:
import nltk
__lowerCamelCase = True
except (ImportError, ModuleNotFoundError):
__lowerCamelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def UpperCamelCase__ ( UpperCAmelCase ) -> str:
"""simple docstring"""
re.sub('''<n>''' , '''''' , UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase ) )
| 307
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 307
| 1
|
import argparse
import os
import re
UpperCamelCase = "src/diffusers"
# Pattern that looks at the indentation in a line.
UpperCamelCase = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase = re.compile(r"\[([^\]]+)\]")
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[str] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Tuple:
_lowercase : List[Any] = 0
_lowercase : List[Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
_lowercase : List[str] = ['\n'.join(lines[:index] )]
else:
_lowercase : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase : List[str] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : List[str] = [lines[index + 1]]
index += 1
else:
_lowercase : Optional[Any] = []
else:
blocks.append('\n'.join(SCREAMING_SNAKE_CASE ) )
_lowercase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append('\n'.join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
def _inner(SCREAMING_SNAKE_CASE ):
return key(SCREAMING_SNAKE_CASE ).lower().replace('_' , '' )
return _inner
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
# If no key is provided, we use a noop.
def noop(SCREAMING_SNAKE_CASE ):
return x
if key is None:
_lowercase : Optional[int] = noop
# Constants are all uppercase, they go first.
_lowercase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase : Optional[int] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
_lowercase : Optional[int] = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
_lowercase : Tuple = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : List[str] = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
_lowercase : Dict = import_statement.split('\n' )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase : Any = 2 if lines[1].strip() == '[' else 1
_lowercase : Optional[int] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
_lowercase : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase : Any = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Any = keys[:-1]
_lowercase : Optional[int] = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
_lowercase : int = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
_lowercase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase : Optional[int] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase : Dict = main_blocks[block_idx]
_lowercase : Dict = block.split('\n' )
# Get to the start of the imports.
_lowercase : Optional[Any] = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase : List[Any] = '\n'.join(block_lines[line_idx:-1] )
_lowercase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase : List[str] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase : List[str] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase : List[Any] = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase : List[str] = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
_lowercase : List[Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase : Dict = 0
_lowercase : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowercase : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
_lowercase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
_lowercase : Union[str, Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
_lowercase : Optional[int] = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , check_only=SCREAMING_SNAKE_CASE )
if result:
_lowercase : Optional[int] = [os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 66
|
from functools import lru_cache
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = 2
snake_case = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return len(unique_prime_factors(UpperCamelCase_ ) )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return len(set(UpperCamelCase_ ) ) in (0, 1)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = 2
while True:
# Increment each value of a generated range
snake_case = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
snake_case = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase__ (UpperCamelCase_ = 4 ):
"""simple docstring"""
snake_case = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 550
| 0
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : Any = '''โ'''
UpperCamelCase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Dict = BertGenerationTokenizer
_A : List[Any] = False
_A : Union[str, Any] = True
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Dict = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = """<s>"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase__ , ["""โThis""", """โis""", """โa""", """โt""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsรฉ.""" )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""รฉ""",
""".""",
] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = """Hello World!"""
__SCREAMING_SNAKE_CASE : Dict = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__SCREAMING_SNAKE_CASE : Optional[int] = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__SCREAMING_SNAKE_CASE : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__SCREAMING_SNAKE_CASE : int = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = BertGenerationConfig()
__SCREAMING_SNAKE_CASE : Any = BertGenerationEncoder(lowerCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 709
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 178
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 237
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
SCREAMING_SNAKE_CASE_ = {
"""camembert-base""": 5_12,
}
SCREAMING_SNAKE_CASE_ = """โ"""
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=["<s>NOTUSED", "</s>NOTUSED"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
a_ : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
a_ : Union[str, Any] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
a_ : List[Any] = len(self.fairseq_tokens_to_ids )
a_ : List[Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
a_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ ( self , a_ , a_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Tuple = [self.cls_token_id]
a_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def snake_case_ ( self ):
a_ : Any = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case_ ( self , a_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(a_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(a_ )
def snake_case_ ( self , a_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , a_ ):
a_ : Any = []
a_ : int = ""
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
a_ : Union[str, Any] = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(a_ )
a_ : Dict = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def __getstate__( self ):
a_ : Any = self.__dict__.copy()
a_ : Union[str, Any] = None
return state
def __setstate__( self , a_ ):
a_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a_ : Optional[int] = {}
a_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : List[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 237
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase = 'roberta'
elif args.model_type == "gpt2":
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCAmelCase = 'transformer'
UpperCAmelCase = model.state_dict()
UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCAmelCase = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCAmelCase = F'''{prefix}.embeddings.{w}.weight'''
UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCAmelCase = F'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCAmelCase = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCAmelCase = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''lm_head.dense.{w}''']
UpperCAmelCase = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''{prefix}.ln_f.{w}''']
UpperCAmelCase = state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 344
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
| 1
|
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 8.3144598
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ) -> str:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCAmelCase_ : Any = 300
lowerCAmelCase_ : List[Any] = 28
lowerCAmelCase_ : Tuple = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 489
|
import operator as op
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " )
print("-" * (3_0 + len(__UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
A : str = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 140
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : Tuple = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class a_ ( unittest.TestCase ):
UpperCamelCase_ : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase_ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase_ : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowerCAmelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
lowerCAmelCase__ = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
lowerCAmelCase__ = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
lowerCAmelCase__ = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
lowerCAmelCase__ = text_classifier("""This is great !""" , return_all_scores=snake_case__ )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
lowerCAmelCase__ = text_classifier("""This is great !""" , return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
lowerCAmelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
lowerCAmelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str ):
import torch
lowerCAmelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowerCAmelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowerCAmelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = pipeline("""text-classification""" )
lowerCAmelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCAmelCase__ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCAmelCase__ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = pipeline("""text-classification""" , framework="""tf""" )
lowerCAmelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCAmelCase__ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCAmelCase__ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] ):
lowerCAmelCase__ = TextClassificationPipeline(model=snake_case__ , tokenizer=snake_case__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict ):
lowerCAmelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ = """HuggingFace is in"""
lowerCAmelCase__ = text_classifier(snake_case__ )
self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowerCAmelCase__ = ["""HuggingFace is in """, """Paris is in France"""]
lowerCAmelCase__ = text_classifier(snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) , [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}, {"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ = text_classifier(snake_case__ , top_k=snake_case__ )
lowerCAmelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(snake_case__ ) , [[{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] * N, [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] * N] , )
lowerCAmelCase__ = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowerCAmelCase__ = text_classifier(snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) , {"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(snake_case__ ):
text_classifier(snake_case__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(snake_case__ ) , [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 701
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __magic_name__ ( lowerCAmelCase_=None):
'''simple docstring'''
if subparsers is not None:
lowerCamelCase_ : Any = subparsers.add_parser("test")
else:
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser("Accelerate test command")
parser.add_argument(
"--config_file" , default=lowerCAmelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_)
return parser
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
if args.config_file is None:
lowerCamelCase_ : Any = script_name
else:
lowerCamelCase_ : Optional[int] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase_ : Any = ["accelerate-launch"] + test_args.split()
lowerCamelCase_ : List[Any] = execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy())
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = test_command_parser()
lowerCamelCase_ : Optional[Any] = parser.parse_args()
test_command(lowerCAmelCase_)
if __name__ == "__main__":
main()
| 250
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Any = TextaTextGenerationPipeline(model=a_ , tokenizer=a_ )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Tuple = generator("Something there" )
self.assertEqual(a_ , [{"generated_text": ANY(a_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
lowerCamelCase_ : str = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
] , )
lowerCamelCase_ : List[str] = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
] , )
with self.assertRaises(a_ ):
generator(4 )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
lowerCamelCase_ : Tuple = generator("Something there" , do_sample=a_ )
self.assertEqual(a_ , [{"generated_text": ""}] )
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : str = generator(
"Something there" , num_return_sequences=a_ , num_beams=a_ , )
lowerCamelCase_ : Dict = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(a_ , a_ )
lowerCamelCase_ : Any = generator("This is a test" , do_sample=a_ , num_return_sequences=2 , return_tensors=a_ )
self.assertEqual(
a_ , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
lowerCamelCase_ : Tuple = generator.model.config.eos_token_id
lowerCamelCase_ : List[str] = "<pad>"
lowerCamelCase_ : Tuple = generator(
["This is a test", "This is a second test"] , do_sample=a_ , num_return_sequences=2 , batch_size=2 , return_tensors=a_ , )
self.assertEqual(
a_ , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
lowerCamelCase_ : Any = generator("Something there" , do_sample=a_ )
self.assertEqual(a_ , [{"generated_text": ""}] )
| 250
| 1
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def UpperCamelCase ( *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCAmelCase_ , {
"""score""": ANY(UpperCAmelCase_ ),
"""label""": ANY(UpperCAmelCase_ ),
"""box""": {"""xmin""": ANY(UpperCAmelCase_ ), """ymin""": ANY(UpperCAmelCase_ ), """xmax""": ANY(UpperCAmelCase_ ), """ymax""": ANY(UpperCAmelCase_ )},
} , )
import datasets
_SCREAMING_SNAKE_CASE = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
_SCREAMING_SNAKE_CASE = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
_SCREAMING_SNAKE_CASE = object_detector(UpperCAmelCase_ , threshold=0.0 )
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCAmelCase_ , {
"""score""": ANY(UpperCAmelCase_ ),
"""label""": ANY(UpperCAmelCase_ ),
"""box""": {"""xmin""": ANY(UpperCAmelCase_ ), """ymin""": ANY(UpperCAmelCase_ ), """xmax""": ANY(UpperCAmelCase_ ), """ymax""": ANY(UpperCAmelCase_ )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
@require_torch
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-detr-mobilenetsv3"""
_SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
_SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
_SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
_SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
_SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
_SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0.99_85
_SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
_SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=UpperCAmelCase_ )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """Narsil/layoutlmv3-finetuned-funsd"""
_SCREAMING_SNAKE_CASE = 0.99_93
_SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=UpperCAmelCase_ , threshold=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 569
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
with open(snake_case__ ) as metadata_file:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=snake_case__ ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
# Load the entity vocab file
_SCREAMING_SNAKE_CASE = load_entity_vocab(snake_case__ )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_SCREAMING_SNAKE_CASE = AddedToken("""<ent>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
_SCREAMING_SNAKE_CASE = AddedToken("""<ent2>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
_SCREAMING_SNAKE_CASE = state_dict["""embeddings.word_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_SCREAMING_SNAKE_CASE = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["""[MASK]"""]]
_SCREAMING_SNAKE_CASE = LukeModel(config=snake_case__ ).eval()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ ,task="""entity_classification""" )
_SCREAMING_SNAKE_CASE = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_SCREAMING_SNAKE_CASE = (39, 42)
_SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ,entity_spans=[span] ,add_prefix_space=snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
with open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.rstrip().split("""\t""" )
_SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 569
| 1
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
def get_masked_lm_array(_lowercase ):
__UpperCamelCase = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__UpperCamelCase = tf.train.load_variable(_lowercase , _lowercase )
if "kernel" in name:
__UpperCamelCase = array.transpose()
return torch.from_numpy(_lowercase )
def get_encoder_array(_lowercase ):
__UpperCamelCase = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__UpperCamelCase = tf.train.load_variable(_lowercase , _lowercase )
if "kernel" in name:
__UpperCamelCase = array.transpose()
return torch.from_numpy(_lowercase )
def get_encoder_layer_array(_lowercase , _lowercase ):
__UpperCamelCase = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__UpperCamelCase = tf.train.load_variable(_lowercase , _lowercase )
if "kernel" in name:
__UpperCamelCase = array.transpose()
return torch.from_numpy(_lowercase )
def get_encoder_attention_layer_array(_lowercase , _lowercase , _lowercase ):
__UpperCamelCase = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__UpperCamelCase = tf.train.load_variable(_lowercase , _lowercase )
__UpperCamelCase = array.reshape(_lowercase )
if "kernel" in name:
__UpperCamelCase = array.transpose()
return torch.from_numpy(_lowercase )
print(f'''Loading model based on config from {config_path}...''' )
__UpperCamelCase = BertConfig.from_json_file(_lowercase )
__UpperCamelCase = BertForMaskedLM(_lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__UpperCamelCase = model.bert.encoder.layer[layer_index]
# Self-attention
__UpperCamelCase = layer.attention.self
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_query_dense/bias' , self_attn.query.bias.data.shape )
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_key_dense/bias' , self_attn.key.bias.data.shape )
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
__UpperCamelCase = layer.attention.output
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
__UpperCamelCase = get_encoder_attention_layer_array(
_lowercase , '_output_dense/bias' , self_output.dense.bias.data.shape )
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_attention_layer_norm/gamma' )
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_attention_layer_norm/beta' )
# Intermediate
__UpperCamelCase = layer.intermediate
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_intermediate_dense/kernel' )
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_intermediate_dense/bias' )
# Output
__UpperCamelCase = layer.output
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_output_dense/kernel' )
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_output_dense/bias' )
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_output_layer_norm/gamma' )
__UpperCamelCase = get_encoder_layer_array(_lowercase , '_output_layer_norm/beta' )
# Embeddings
__UpperCamelCase = get_encoder_array('_position_embedding_layer/embeddings' )
__UpperCamelCase = get_encoder_array('_type_embedding_layer/embeddings' )
__UpperCamelCase = get_encoder_array('_embedding_norm_layer/gamma' )
__UpperCamelCase = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
__UpperCamelCase = model.cls.predictions.transform
__UpperCamelCase = get_masked_lm_array('dense/kernel' )
__UpperCamelCase = get_masked_lm_array('dense/bias' )
__UpperCamelCase = get_masked_lm_array('layer_norm/gamma' )
__UpperCamelCase = get_masked_lm_array('layer_norm/beta' )
__UpperCamelCase = get_masked_lm_array('embedding_table' )
# Pooling
__UpperCamelCase = BertPooler(config=_lowercase )
__UpperCamelCase = get_encoder_array('_pooler_layer/kernel' )
__UpperCamelCase = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(_lowercase )
# Integration test - should load without any errors ;)
__UpperCamelCase = BertForMaskedLM.from_pretrained(_lowercase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__snake_case = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a__ = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def _UpperCAmelCase ( a : Optional[Any] ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
a__ = parser.parse_args()
if args.check_lib:
a__ = importlib.import_module("""transformers""")
a__ = Path(transformers_module.__file__).parent
else:
a__ = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 719
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Union[tf.Tensor, np.ndarray] ):
if isinstance(a , np.ndarray ):
return list(tensor.shape )
snake_case__ = tf.shape(a )
if tensor.shape == tf.TensorShape(a ):
return dynamic
snake_case__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a )]
def _UpperCAmelCase ( a : tf.Tensor , a : Optional[int] = None , a : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=a , name=a )
def _UpperCAmelCase ( a : Optional[int] , a : Union[str, Any] , a : Dict , a : int=1e-5 , a : Dict=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a , a ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
snake_case__ , snake_case__ = tf.nn.moments(a , axes=[axis] , keepdims=a )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case__ = [1] * inputs.shape.rank
snake_case__ = shape_list(a )[axis]
snake_case__ = tf.reshape(a , a )
snake_case__ = tf.reshape(a , a )
# Compute layer normalization using the batch_normalization
# function.
snake_case__ = tf.nn.batch_normalization(
a , a , a , offset=a , scale=a , variance_epsilon=a , )
return outputs
def _UpperCAmelCase ( a : Optional[int] , a : Dict=0 , a : List[Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case__ = tf.shape(a )
snake_case__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a , a )
def _UpperCAmelCase ( a : tf.Tensor ):
if not isinstance(a , tf.Tensor ):
snake_case__ = tf.convert_to_tensor(a ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _UpperCAmelCase ( a : tf.Tensor , a : int , a : str = "input_ids" ):
tf.debugging.assert_less(
a , tf.cast(a , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(a )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _UpperCAmelCase ( a : str , a : Tuple , a : Optional[int] ):
snake_case__ = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case__ = [x for x in data if len(a ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
snake_case__ = np.asarray(a )
snake_case__ = 1
snake_case__ = np.array_split(a , a )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case__ = np.array_split(a , a )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a ):
snake_case__ = chunk_data
else:
snake_case__ = data
def _UpperCAmelCase ( a : List[Any] , a : Optional[int] ):
if name in group.attrs:
snake_case__ = [n.decode("""utf8""" ) if hasattr(a , """decode""" ) else n for n in group.attrs[name]]
else:
snake_case__ = []
snake_case__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(a , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _UpperCAmelCase ( a : Optional[int] ):
def _expand_single_ad_tensor(a : Any ):
if isinstance(a , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a )
| 99
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase_ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {f'funnel-transformer/{name}': 512 for name in _model_names}
lowerCamelCase_ = {f'funnel-transformer/{name}': {'''do_lower_case''': True} for name in _model_names}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = FunnelTokenizer
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = 2
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Tuple="<sep>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Optional[Any]="<cls>" , lowerCAmelCase_ : Tuple="<mask>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : Dict="</s>" , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]="##" , **lowerCAmelCase_ : str , ) -> int:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , clean_text=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , wordpieces_prefix=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : str = getattr(lowerCAmelCase_ , normalizer_state.pop("type" ) )
UpperCAmelCase_ : List[Any] = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : Dict = tokenize_chinese_chars
UpperCAmelCase_ : Optional[Any] = normalizer_class(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=None ) -> Optional[Any]:
UpperCAmelCase_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 95
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ (enum.Enum ):
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
__magic_name__ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Any = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
UpperCAmelCase_ : List[Any] = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : Optional[int] = {**self._forward_params, **forward_params}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[Any] , ) -> int:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : Tuple = prefix
if prefix:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
UpperCAmelCase_ : Dict = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generate_kwargs
UpperCAmelCase_ : Dict = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : Tuple = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : int = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : int = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]="" , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : Any = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : Optional[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Dict = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase_ : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Tuple = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase_ : Dict = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Union[str, Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Dict:
UpperCAmelCase_ : Optional[Any] = model_inputs["input_ids"]
UpperCAmelCase_ : str = model_inputs.get("attention_mask" , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = 1
else:
UpperCAmelCase_ : Union[str, Any] = input_ids.shape[0]
UpperCAmelCase_ : Any = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : Any = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Tuple = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[int] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : int = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : Optional[int] = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : List[Any] = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=ReturnType.FULL_TEXT , lowerCAmelCase_ : Dict=True ) -> List[str]:
UpperCAmelCase_ : List[Any] = model_outputs["generated_sequence"][0]
UpperCAmelCase_ : int = model_outputs["input_ids"]
UpperCAmelCase_ : List[str] = model_outputs["prompt_text"]
UpperCAmelCase_ : Union[str, Any] = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : str = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[Any] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Union[str, Any] = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : int = {"generated_text": all_text}
records.append(lowerCAmelCase_ )
return records
| 95
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ = "https://www.worldometers.info/coronavirus"):
a__ = BeautifulSoup(requests.get(lowerCamelCase_).text , '''html.parser''')
a__ = soup.findAll('''h1''')
a__ = soup.findAll('''div''' , {'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''})
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase_ , lowerCamelCase_)}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 200
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Any = logging.get_logger(__name__)
__a : Union[str, Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='swin2sr'
_SCREAMING_SNAKE_CASE ={
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Union[str, Any] , __A: List[Any]=64 , __A: int=1 , __A: Dict=3 , __A: List[Any]=180 , __A: int=[6, 6, 6, 6, 6, 6] , __A: Tuple=[6, 6, 6, 6, 6, 6] , __A: int=8 , __A: Optional[int]=2.0 , __A: Optional[int]=True , __A: int=0.0 , __A: Any=0.0 , __A: Optional[Any]=0.1 , __A: Optional[Any]="gelu" , __A: Dict=False , __A: List[Any]=0.0_2 , __A: List[Any]=1e-5 , __A: List[str]=2 , __A: int=1.0 , __A: Dict="1conv" , __A: Optional[Any]="pixelshuffle" , **__A: Dict , ):
'''simple docstring'''
super().__init__(**__A )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = embed_dim
a__ = depths
a__ = len(__A )
a__ = num_heads
a__ = window_size
a__ = mlp_ratio
a__ = qkv_bias
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = drop_path_rate
a__ = hidden_act
a__ = use_absolute_embeddings
a__ = layer_norm_eps
a__ = initializer_range
a__ = upscale
a__ = img_range
a__ = resi_connection
a__ = upsampler
| 200
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a_ :
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=9_9 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Optional[Any]=3_7 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : List[Any]=1_0_0_0 , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = range_bbox
def lowercase__ ( self : str ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
__snake_case = TFLayoutLMModel(config=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
__snake_case = TFLayoutLMForMaskedLM(config=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
__snake_case = self.num_labels
__snake_case = TFLayoutLMForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
__snake_case = self.num_labels
__snake_case = TFLayoutLMForTokenClassification(config=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
__snake_case = TFLayoutLMForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] ):
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
lowercase_ : Tuple = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowercase_ : Tuple = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : Optional[Any] = False
lowercase_ : Dict = True
lowercase_ : Dict = 10
def lowercase__ ( self : List[str] ):
__snake_case = TFLayoutLMModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def lowercase__ ( self : Any ):
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self : int ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Optional[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Any ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Dict ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def lowercase__ ( self : Tuple ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFLayoutLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowercase__ ( self : Dict ):
pass
def lowerCamelCase__ ( ):
__snake_case = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
__snake_case = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__snake_case = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__snake_case = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__snake_case = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : Dict ):
__snake_case = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
# test the sequence output on [0, :3, :3]
__snake_case = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# test the pooled output on [1, :3]
__snake_case = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
@slow
def lowercase__ ( self : Any ):
# initialize model with randomly initialized sequence classification head
__snake_case = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case = model(
input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__snake_case = outputs.loss
__snake_case = (2,)
self.assertEqual(loss.shape , __SCREAMING_SNAKE_CASE )
# test the shape of the logits
__snake_case = outputs.logits
__snake_case = (2, 2)
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
@slow
def lowercase__ ( self : Tuple ):
# initialize model with randomly initialized token classification head
__snake_case = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case = model(
input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
__snake_case = outputs.logits
__snake_case = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
@slow
def lowercase__ ( self : Any ):
# initialize model with randomly initialized token classification head
__snake_case = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
__snake_case = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , __SCREAMING_SNAKE_CASE )
self.assertEqual(outputs.end_logits.shape , __SCREAMING_SNAKE_CASE )
| 356
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE : int = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __A ( _A ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__a = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__a = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__a = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__a = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = {}
import re
__a = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_A ):
__a = re_encoder_block_conv_in.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] )
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__a = re_encoder_block_conv_in.sub(_A , _A )
elif re_encoder_block_resnet.fullmatch(_A ):
__a = re_encoder_block_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] )
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_encoder_block_resnet.sub(_A , _A )
elif re_encoder_block_proj_out.fullmatch(_A ):
__a = re_encoder_block_proj_out.match(_A )
__a = regex_match.groups()
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__a = re_encoder_block_proj_out.sub(_A , _A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_A ):
__a = re_decoder_block_conv_out.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] ) - 2
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__a = re_decoder_block_conv_out.sub(_A , _A )
elif re_decoder_block_resnet.fullmatch(_A ):
__a = re_decoder_block_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] ) - 2
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_decoder_block_resnet.sub(_A , _A )
elif re_decoder_block_proj_in.fullmatch(_A ):
__a = re_decoder_block_proj_in.match(_A )
__a = regex_match.groups()
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__a = re_decoder_block_proj_in.sub(_A , _A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_A ):
__a = re_prior_cond_conv_out.match(_A )
__a = regex_match.groups()
__a = int(groups[1] ) * 2 + int(groups[2] ) - 2
__a = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__a = re_prior_cond_conv_out.sub(_A , _A )
elif re_prior_cond_resnet.fullmatch(_A ):
__a = re_prior_cond_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[1] ) * 2 + int(groups[2] ) - 2
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_prior_cond_resnet.sub(_A , _A )
elif re_prior_cond_proj_in.fullmatch(_A ):
__a = re_prior_cond_proj_in.match(_A )
__a = regex_match.groups()
__a = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__a = re_prior_cond_proj_in.sub(_A , _A )
# keep original key
else:
__a = original_key
__a = replace_key(_A )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__a = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__a = original_key
__a = original_key
__a = value
return new_dict
@torch.no_grad()
def __A ( _A=None , _A=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__a = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_A )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_A )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
__a = MODEL_MAPPING[model_name.split("/" )[-1]]
__a = JukeboxConfig.from_pretrained(_A )
__a = JukeboxModel(_A )
__a = []
__a = {}
for i, dict_name in enumerate(_A ):
__a = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
__a = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__a = old_dic[k]
elif k.endswith(".w" ):
__a = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__a = old_dic[k]
else:
__a = old_dic[k]
__a = "vqvae" if i == 0 else f"""priors.{3 - i}"""
__a = fix_jukebox_keys(_A , model.state_dict() , _A , _A )
weight_dict.append(_A )
__a = weight_dict.pop(0 )
model.vqvae.load_state_dict(_A )
for i in range(len(_A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_A ).mkdir(exist_ok=_A )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(_A , _A )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 197
| 0
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[int]] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: set ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = len(__UpperCamelCase ), len(grid[0] )
if (
min(__UpperCamelCase ,__UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE : Dict = 0
count += depth_first_search(__UpperCamelCase ,row + 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,row - 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col + 1 ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col - 1 ,__UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[int]] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: set ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = len(__UpperCamelCase ), len(grid[0] )
if (
min(__UpperCamelCase ,__UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE : Dict = 0
count += depth_first_search(__UpperCamelCase ,row + 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,row - 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col + 1 ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col - 1 ,__UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 508
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'bert-generation'
def __init__( self : List[Any] , lowerCAmelCase : Tuple=5_0358 , lowerCAmelCase : List[Any]=1024 , lowerCAmelCase : Any=24 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Dict=4096 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : List[Any]=1e-12 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Any=1 , lowerCAmelCase : Optional[int]="absolute" , lowerCAmelCase : Any=True , **lowerCAmelCase : List[str] , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 169
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = BertJapaneseTokenizer
_a = False
_a = True
def __lowercase ( self : Any ):
super().setUp()
lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""ใใใซใกใฏ""",
"""ใใ""",
"""ใซใกใฏ""",
"""ใฐใใฏ""",
"""##ใใ""",
"""##ใซใกใฏ""",
"""##ใฐใใฏ""",
"""ไธ็""",
"""##ไธ็""",
"""ใ""",
"""##ใ""",
"""ใ""",
"""##ใ""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowercase ( self : int , lowerCAmelCase : List[Any] ):
lowerCAmelCase = """ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ"""
lowerCAmelCase = """ใใใซใกใฏ ใ ไธ็ ใ ใใใฐใใฏ ใ ไธ็ ใ"""
return input_text, output_text
def __lowercase ( self : Optional[Any] , lowerCAmelCase : List[Any] ):
lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase )
lowerCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
return text, ids
def __lowercase ( self : List[str] ):
pass # TODO add if relevant
def __lowercase ( self : Optional[Any] ):
pass # TODO add if relevant
def __lowercase ( self : Any ):
pass # TODO add if relevant
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize("""ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ""" )
self.assertListEqual(lowerCAmelCase , ["""ใใใซใกใฏ""", """ใ""", """ไธ็""", """ใ""", """ใใ""", """##ใฐใใฏ""", """ใ""", """ไธ็""", """ใ"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __lowercase ( self : int ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(lowerCAmelCase )
lowerCAmelCase = """ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ"""
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["""ใใใซใกใฏ""", """ใ""", """ไธ็""", """ใ""", """ใใ""", """##ใฐใใฏ""", """ใ""", """ไธ็""", """ใ"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as handle:
lowerCAmelCase = pickle.load(lowerCAmelCase )
lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซในใใข""", """ใง""", """iPhone""", """8""", """ใ""", """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ใ"""] , )
def __lowercase ( self : int ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซ""", """ในใใข""", """ใง""", """iPhone""", """8""", """ใ""", """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ใ"""] , )
def __lowercase ( self : Optional[int] ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซ""", """ในใใข""", """ใง""", """iPhone""", """8""", """ใ""", """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ใ"""] , )
def __lowercase ( self : Any ):
lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซในใใข""", """ใง""", """iphone""", """8""", """ใ""", """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ใ"""] , )
def __lowercase ( self : Optional[int] ):
try:
lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase , normalize_text=lowerCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใข""", """ใง""", """iPhone""", """๏ผ""", """ใ""", """็บๅฃฒ""", """ใ""", """ใใ""", """\u3000""", """ใ"""] , )
def __lowercase ( self : int ):
lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใข""", """ใง""", """iPhone""", """๏ผ""", """ใ""", """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ใ""", """ใ"""] , )
@require_sudachi
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(lowerCAmelCase )
lowerCAmelCase = """ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ"""
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["""ใใใซใกใฏ""", """ใ""", """ไธ็""", """ใ""", """ใใ""", """##ใฐใใฏ""", """ใ""", """ไธ็""", """ใ"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as handle:
lowerCAmelCase = pickle.load(lowerCAmelCase )
lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@require_sudachi
def __lowercase ( self : str ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , [""" """, """\t""", """ใขใใใซ""", """ในใใข""", """ใง""", """iPhone""", """8""", """ """, """ใ""", """ """, """ """, """\n """, """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ """, """ใ""", """ """, """ """] , )
@require_sudachi
def __lowercase ( self : str ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""ๅคๅฝไบบๅๆฟๆจฉ""" ) , ["""ๅคๅฝ""", """ไบบ""", """ๅๆฟ""", """ๆจฉ"""] )
@require_sudachi
def __lowercase ( self : Dict ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""ๅคๅฝไบบๅๆฟๆจฉ""" ) , ["""ๅคๅฝไบบ""", """ๅๆฟๆจฉ"""] )
@require_sudachi
def __lowercase ( self : int ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""ๅคๅฝไบบๅๆฟๆจฉ""" ) , ["""ๅคๅฝไบบๅๆฟๆจฉ"""] )
@require_sudachi
def __lowercase ( self : str ):
lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , [""" """, """\t""", """ใขใใใซ""", """ในใใข""", """ใง""", """iphone""", """8""", """ """, """ใ""", """ """, """ """, """\n """, """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ """, """ใ""", """ """, """ """] , )
@require_sudachi
def __lowercase ( self : Tuple ):
lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , [""" """, """\t""", """๏ฝฑ๏ฝฏ๏พ๏พ๏พ""", """ในใใข""", """ใง""", """iPhone""", """๏ผ""", """ """, """ใ""", """ """, """ """, """\n """, """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """\u3000""", """ใ""", """ """, """ """] , )
@require_sudachi
def __lowercase ( self : List[Any] ):
lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซ""", """ในใใข""", """ใง""", """iPhone""", """8""", """ใ""", """็บๅฃฒ""", """ใ""", """ใ""", """ใ""", """ใ"""] , )
@require_jumanpp
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(lowerCAmelCase )
lowerCAmelCase = """ใใใซใกใฏใไธ็ใ\nใใใฐใใฏใไธ็ใ"""
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["""ใใใซใกใฏ""", """ใ""", """ไธ็""", """ใ""", """ใใ""", """##ใฐใใฏ""", """ใ""", """ไธ็""", """ใ"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as handle:
lowerCAmelCase = pickle.load(lowerCAmelCase )
lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@require_jumanpp
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซ""", """ในใใข""", """ใง""", """iPhone""", """8""", """\u3000""", """ใ""", """\u3000""", """\u3000""", """\u3000""", """็บๅฃฒ""", """ใ""", """ใใ""", """\u3000""", """ใ"""] , )
@require_jumanpp
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซ""", """ในใใข""", """ใง""", """iphone""", """8""", """\u3000""", """ใ""", """\u3000""", """\u3000""", """\u3000""", """็บๅฃฒ""", """ใ""", """ใใ""", """\u3000""", """ใ"""] , )
@require_jumanpp
def __lowercase ( self : int ):
lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""๏ฝฑ""", """๏ฝฏ""", """๏พ""", """๏พ""", """๏พ""", """ในใใข""", """ใง""", """iPhone""", """๏ผ""", """\u3000""", """ใ""", """\u3000""", """\u3000""", """\u3000""", """็บๅฃฒ""", """ใ""", """ใใ""", """\u3000""", """ใ"""] , )
@require_jumanpp
def __lowercase ( self : Any ):
lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \t๏ฝฑ๏ฝฏ๏พ๏พ๏พในใใขใงiPhone๏ผ ใ \n ็บๅฃฒใใใใใ """ ) , ["""ใขใใใซ""", """ในใใข""", """ใง""", """iPhone""", """8""", """ใ""", """็บๅฃฒ""", """ใ""", """ใใ""", """ใ"""] , )
@require_jumanpp
def __lowercase ( self : Tuple ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ใใใใจใใใใใพใm(_ _)๏ฝ่ฆใคใใใฎใๅคงๅคใงใใ""" ) , ["""ใใใใจใ""", """ใใใใพใ""", """m(_ _)m""", """่ฆใคใใ""", """ใฎ""", """ใ""", """ๅคงๅคใงใ""", """ใ"""] , )
def __lowercase ( self : str ):
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """ใใใซใกใฏ""", """ใใ""", """ใซใกใฏ""", """ใฐใใฏ""", """##ใใ""", """##ใซใกใฏ""", """##ใฐใใฏ"""]
lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""ใใใซใกใฏ""" ) , ["""ใใใซใกใฏ"""] )
self.assertListEqual(tokenizer.tokenize("""ใใใฐใใฏ""" ) , ["""ใใ""", """##ใฐใใฏ"""] )
self.assertListEqual(tokenizer.tokenize("""ใใใฐใใฏ ใใใฐใใซใกใฏ ใใใซใกใฏ""" ) , ["""ใใ""", """##ใฐใใฏ""", """[UNK]""", """ใใใซใกใฏ"""] )
def __lowercase ( self : Dict ):
lowerCAmelCase = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
lowerCAmelCase = tokenizer.subword_tokenizer
lowerCAmelCase = subword_tokenizer.tokenize("""ๅฝๅข ใฎ ้ทใ ใใณใใซ ใ ๆใใ ใจ ้ชๅฝ ใงใใฃใ ใ""" )
self.assertListEqual(lowerCAmelCase , ["""โๅฝๅข""", """โใฎ""", """โ้ทใ""", """โใใณใใซ""", """โใ""", """โๆใใ""", """โใจ""", """โ้ช""", """ๅฝ""", """โใงใใฃใ""", """โใ"""] )
lowerCAmelCase = subword_tokenizer.tokenize("""ใใใฐใใฏ ใใใฐใ ใซใก ใฏ ใใใซใกใฏ""" )
self.assertListEqual(lowerCAmelCase , ["""โใใ""", """ใฐใ""", """ใฏ""", """โใใ""", """ใฐใ""", """โใซ""", """ใก""", """โใฏ""", """โใใใซใกใฏ"""] )
def __lowercase ( self : str ):
lowerCAmelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
lowerCAmelCase = tokenizer.encode("""ใใใใจใใ""" , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.encode("""ใฉใใใใใพใใฆใ""" , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = BertJapaneseTokenizer
_a = False
def __lowercase ( self : Union[str, Any] ):
super().setUp()
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """ใ""", """ใ""", """ใซ""", """ใก""", """ใฏ""", """ใฐ""", """ไธ""", """็""", """ใ""", """ใ"""]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowercase ( self : Optional[int] , **lowerCAmelCase : Optional[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase )
def __lowercase ( self : List[str] , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = """ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ"""
lowerCAmelCase = """ใ ใ ใซ ใก ใฏ ใ ไธ ็ ใ ใ ใ ใฐ ใ ใฏ ใ ไธ ็ ใ"""
return input_text, output_text
def __lowercase ( self : List[Any] ):
pass # TODO add if relevant
def __lowercase ( self : Optional[Any] ):
pass # TODO add if relevant
def __lowercase ( self : int ):
pass # TODO add if relevant
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
lowerCAmelCase = tokenizer.tokenize("""ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ""" )
self.assertListEqual(
lowerCAmelCase , ["""ใ""", """ใ""", """ใซ""", """ใก""", """ใฏ""", """ใ""", """ไธ""", """็""", """ใ""", """ใ""", """ใ""", """ใฐ""", """ใ""", """ใฏ""", """ใ""", """ไธ""", """็""", """ใ"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __lowercase ( self : Any ):
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """ใ""", """ใ""", """ใซ""", """ใก""", """ใฏ""", """ใฐ""", """ไธ""", """็""", """ใ""", """ใ"""]
lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase ):
lowerCAmelCase = i
lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""ใใใซใกใฏ""" ) , ["""ใ""", """ใ""", """ใซ""", """ใก""", """ใฏ"""] )
self.assertListEqual(tokenizer.tokenize("""ใใใซใกใป""" ) , ["""ใ""", """ใ""", """ใซ""", """ใก""", """[UNK]"""] )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
lowerCAmelCase = tokenizer.encode("""ใใใใจใใ""" , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.encode("""ใฉใใใใใพใใฆใ""" , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = """cl-tohoku/bert-base-japanese"""
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
lowerCAmelCase = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
lowerCAmelCase = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 169
| 1
|
from ...processing_utils import ProcessorMixin
class lowercase__ ( __A ):
__UpperCamelCase = """SpeechT5FeatureExtractor"""
__UpperCamelCase = """SpeechT5Tokenizer"""
def __init__( self , _lowercase , _lowercase ):
super().__init__(_lowercase , _lowercase )
def __call__( self , *_lowercase , **_lowercase ):
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""audio""" , _lowercase )
lowerCAmelCase_ : List[Any] = kwargs.pop("""text""" , _lowercase )
lowerCAmelCase_ : Optional[int] = kwargs.pop("""text_target""" , _lowercase )
lowerCAmelCase_ : int = kwargs.pop("""audio_target""" , _lowercase )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""sampling_rate""" , _lowercase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
lowerCAmelCase_ : Optional[int] = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
elif text is not None:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer(_lowercase , **_lowercase )
else:
lowerCAmelCase_ : Dict = None
if audio_target is not None:
lowerCAmelCase_ : Dict = self.feature_extractor(audio_target=_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
lowerCAmelCase_ : str = targets["""input_values"""]
elif text_target is not None:
lowerCAmelCase_ : str = self.tokenizer(_lowercase , **_lowercase )
lowerCAmelCase_ : List[str] = targets["""input_ids"""]
else:
lowerCAmelCase_ : Dict = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ : int = labels
lowerCAmelCase_ : List[Any] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase_ : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ):
lowerCAmelCase_ : List[Any] = kwargs.pop("""input_values""" , _lowercase )
lowerCAmelCase_ : Tuple = kwargs.pop("""input_ids""" , _lowercase )
lowerCAmelCase_ : str = kwargs.pop("""labels""" , _lowercase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
lowerCAmelCase_ : Optional[int] = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
elif input_ids is not None:
lowerCAmelCase_ : Tuple = self.tokenizer.pad(_lowercase , **_lowercase )
else:
lowerCAmelCase_ : Optional[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowercase , _lowercase ) and "input_ids" in labels[0]):
lowerCAmelCase_ : Union[str, Any] = self.tokenizer.pad(_lowercase , **_lowercase )
lowerCAmelCase_ : List[str] = targets["""input_ids"""]
else:
lowerCAmelCase_ : Optional[Any] = self.feature_extractor.feature_size
lowerCAmelCase_ : Optional[int] = self.feature_extractor.num_mel_bins
lowerCAmelCase_ : List[str] = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
lowerCAmelCase_ : Union[str, Any] = feature_size_hack
lowerCAmelCase_ : Any = targets["""input_values"""]
else:
lowerCAmelCase_ : str = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ : Dict = labels
lowerCAmelCase_ : str = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase_ : Any = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
| 440
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase__ ( __A ):
__UpperCamelCase = """perceiver"""
def __init__( self , _lowercase=256 , _lowercase=1_280 , _lowercase=768 , _lowercase=1 , _lowercase=26 , _lowercase=8 , _lowercase=8 , _lowercase=None , _lowercase=None , _lowercase="kv" , _lowercase=1 , _lowercase=1 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=True , _lowercase=262 , _lowercase=2_048 , _lowercase=56 , _lowercase=[368, 496] , _lowercase=16 , _lowercase=1_920 , _lowercase=16 , _lowercase=[1, 16, 224, 224] , **_lowercase , ):
super().__init__(**_lowercase )
lowerCAmelCase_ : Optional[int] = num_latents
lowerCAmelCase_ : List[str] = d_latents
lowerCAmelCase_ : int = d_model
lowerCAmelCase_ : Dict = num_blocks
lowerCAmelCase_ : Union[str, Any] = num_self_attends_per_block
lowerCAmelCase_ : List[str] = num_self_attention_heads
lowerCAmelCase_ : List[str] = num_cross_attention_heads
lowerCAmelCase_ : List[Any] = qk_channels
lowerCAmelCase_ : Optional[Any] = v_channels
lowerCAmelCase_ : Optional[Any] = cross_attention_shape_for_attention
lowerCAmelCase_ : Optional[int] = self_attention_widening_factor
lowerCAmelCase_ : int = cross_attention_widening_factor
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_query_residual
# masked language modeling attributes
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : List[Any] = max_position_embeddings
# image classification attributes
lowerCAmelCase_ : List[Any] = image_size
# flow attributes
lowerCAmelCase_ : Dict = train_size
# multimodal autoencoding attributes
lowerCAmelCase_ : Optional[Any] = num_frames
lowerCAmelCase_ : int = audio_samples_per_frame
lowerCAmelCase_ : Any = samples_per_patch
lowerCAmelCase_ : Any = output_shape
class lowercase__ ( __A ):
@property
def UpperCAmelCase__ ( self ):
if self.task == "multiple-choice":
lowerCAmelCase_ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self ):
return 1e-4
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 40 , _lowercase = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowercase , _lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : int = preprocessor.num_special_tokens_to_add(_lowercase )
lowerCAmelCase_ : int = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Optional[int] = [""" """.join(["""a"""] ) * seq_length] * batch_size
lowerCAmelCase_ : Optional[Any] = dict(preprocessor(_lowercase , return_tensors=_lowercase ) )
lowerCAmelCase_ : Optional[Any] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowercase , _lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Any = compute_effective_axis_dimension(_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase_ : str = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCAmelCase_ : List[str] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
lowerCAmelCase_ : Tuple = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 440
| 1
|
from pathlib import Path
import numpy as np
from PIL import Image
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE__ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE__ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 85
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 713
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def run_func(lowerCamelCase ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = random.Random()
__lowercase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :TensorFlowBenchmarkArguments
__snake_case :PretrainedConfig
__snake_case :str = "TensorFlow"
@property
def _a ( self : str ) -> Any:
"""simple docstring"""
return tf.__version__
def _a ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> float:
"""simple docstring"""
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowercase = self._prepare_inference_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_speed(_inference )
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> float:
"""simple docstring"""
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowercase = self._prepare_train_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_speed(_train )
def _a ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCAmelCase )
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowercase = self._prepare_inference_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_memory(_inference )
def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCAmelCase )
__lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowercase = self._prepare_train_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_memory(_train )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> Callable[[], None]:
"""simple docstring"""
__lowercase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__lowercase = (
hasattr(_lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , _lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowercase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowercase = __import__("""transformers""" , fromlist=[model_class] )
__lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model_cls(_lowerCAmelCase )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__lowercase = TF_MODEL_MAPPING[config.__class__](_lowerCAmelCase )
# encoder-decoder has vocab size saved differently
__lowercase = config.vocab_size if hasattr(_lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
__lowercase = random_input_ids(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , training=_lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_lowerCAmelCase , training=_lowerCAmelCase )
__lowercase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> Callable[[], None]:
"""simple docstring"""
__lowercase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__lowercase = (
hasattr(_lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , _lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowercase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowercase = __import__("""transformers""" , fromlist=[model_class] )
__lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model_cls(_lowerCAmelCase )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__lowercase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_lowerCAmelCase )
# encoder-decoder has vocab size saved differently
__lowercase = config.vocab_size if hasattr(_lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
__lowercase = random_input_ids(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__lowercase = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )[0]
__lowercase = tf.gradients(_lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )[0]
__lowercase = tf.gradients(_lowerCAmelCase , model.trainable_variables )
return gradients
__lowercase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _a ( self : Dict , _lowerCAmelCase : Any ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(_lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowercase = timeit.repeat(
_lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def _a ( self : str , _lowerCAmelCase : Callable[[], None] ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__lowercase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__lowercase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__lowercase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__lowercase = nvml.nvmlDeviceGetMemoryInfo(_lowerCAmelCase )
__lowercase = meminfo.used
__lowercase = Memory(_lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__lowercase = None
else:
__lowercase = measure_peak_memory_cpu(_lowerCAmelCase )
__lowercase = Memory(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowercase = stop_memory_tracing(_lowerCAmelCase )
if memory is None:
__lowercase = summary.total
else:
__lowercase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 80
|
class _A ( __UpperCamelCase ):
pass
class _A ( __UpperCamelCase ):
pass
class _A :
def __init__(self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [
[],
[],
[],
]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(SCREAMING_SNAKE_CASE_ )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _a (self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__(self ) -> str:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _A :
def __init__(self ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
UpperCamelCase__ = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE_ )
return data
def __str__(self ) -> str:
'''simple docstring'''
return str(self.queue )
def __UpperCamelCase ( ):
UpperCamelCase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
UpperCamelCase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 415
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCamelCase__ : Optional[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCamelCase__ : int = 0
UpperCamelCase__ : Tuple = 1
UpperCamelCase__ : Tuple = 2
UpperCamelCase__ : List[Any] = 3
UpperCamelCase__ : Tuple = 4
class lowerCAmelCase_ ( __lowerCamelCase ):
__a : Tuple = VOCAB_FILES_NAMES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = '''left'''
def __init__( self ,snake_case__ ,snake_case__=False ,snake_case__=True ,snake_case__=False ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="<unk>" ,snake_case__="<sep>" ,snake_case__="<pad>" ,snake_case__="<cls>" ,snake_case__="<mask>" ,snake_case__=["<eop>", "<eod>"] ,snake_case__ = None ,**snake_case__ ,):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else mask_token
SCREAMING_SNAKE_CASE_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ ,remove_space=a_ ,keep_accents=a_ ,bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,sep_token=a_ ,pad_token=a_ ,cls_token=a_ ,mask_token=a_ ,additional_special_tokens=a_ ,sp_model_kwargs=self.sp_model_kwargs ,**a_ ,)
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : Dict = do_lower_case
SCREAMING_SNAKE_CASE_ : str = remove_space
SCREAMING_SNAKE_CASE_ : Tuple = keep_accents
SCREAMING_SNAKE_CASE_ : Dict = vocab_file
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return state
def __setstate__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self ,snake_case__ ):
if self.remove_space:
SCREAMING_SNAKE_CASE_ : Optional[int] = " ".join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE_ : str = inputs
SCREAMING_SNAKE_CASE_ : Any = outputs.replace('``' ,'\"' ).replace('\'\'' ,'\"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ : Dict = unicodedata.normalize('NFKD' ,a_ )
SCREAMING_SNAKE_CASE_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ : Any = outputs.lower()
return outputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(a_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.encode(a_ ,out_type=a_ )
SCREAMING_SNAKE_CASE_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE_ : int = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def snake_case ( self ,snake_case__ ):
return self.sp_model.PieceToId(a_ )
def snake_case ( self ,snake_case__ ):
return self.sp_model.IdToPiece(a_ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = "".join(a_ ).replace(a_ ,' ' ).strip()
return out_string
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = None ,snake_case__ = True ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('use_source_tokenizer' ,a_ )
SCREAMING_SNAKE_CASE_ : List[str] = self.convert_ids_to_tokens(a_ ,skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "".join(a_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ ,token_ids_a=a_ ,already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not os.path.isdir(a_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ ,'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 707
|
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685
| 0
|
"""simple docstring"""
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = set_counts
lowerCAmelCase = max(_snake_case )
lowerCAmelCase = len(_snake_case )
lowerCAmelCase = [1] * num_sets
lowerCAmelCase = list(range(_snake_case ) )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.get_parent(_snake_case )
lowerCAmelCase = self.get_parent(_snake_case )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase = 0
lowerCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase = 0
lowerCAmelCase = src_parent
lowerCAmelCase = self.set_counts[src_parent]
lowerCAmelCase = max(self.max_set , _snake_case )
return True
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 4
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374
| 0
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
SCREAMING_SNAKE_CASE__ : Dict =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 558
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] ={
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """sew-d"""
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase=2 , _lowercase=512 , _lowercase=256 , _lowercase=True , _lowercase=True , _lowercase=("p2c", "c2p") , _lowercase="layer_norm" , _lowercase="gelu_python" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1E-7 , _lowercase=1E-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ) -> str:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : str = feat_extract_norm
_lowerCamelCase : int = feat_extract_activation
_lowerCamelCase : Optional[int] = list(_lowercase )
_lowerCamelCase : Any = list(_lowercase )
_lowerCamelCase : Dict = list(_lowercase )
_lowerCamelCase : List[Any] = conv_bias
_lowerCamelCase : Dict = num_conv_pos_embeddings
_lowerCamelCase : Optional[int] = num_conv_pos_embedding_groups
_lowerCamelCase : Dict = len(self.conv_dim )
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = squeeze_factor
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Any = position_buckets
_lowerCamelCase : str = share_att_key
_lowerCamelCase : Optional[int] = relative_attention
_lowerCamelCase : Tuple = norm_rel_ebd
_lowerCamelCase : Union[str, Any] = list(_lowercase )
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : str = hidden_dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Union[str, Any] = feat_proj_dropout
_lowerCamelCase : int = final_dropout
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Dict = feature_layer_norm_eps
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : str = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Union[str, Any] = apply_spec_augment
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[str] = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : List[str] = mask_feature_length
_lowerCamelCase : int = mask_feature_min_masks
# ctc loss
_lowerCamelCase : int = ctc_loss_reduction
_lowerCamelCase : List[Any] = ctc_zero_infinity
# sequence classification
_lowerCamelCase : Optional[int] = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
@property
def a__ ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 558
| 1
|
"""simple docstring"""
import os
def _lowercase ( __snake_case = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) as input_file:
__lowerCAmelCase : Union[str, Any] = [
[int(__snake_case ) for element in line.split("," )]
for line in input_file.readlines()
]
__lowerCAmelCase : Optional[Any] = len(__snake_case )
__lowerCAmelCase : List[str] = len(matrix[0] )
__lowerCAmelCase : Union[str, Any] = [[-1 for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
__lowerCAmelCase : Dict = matrix[i][0]
for j in range(1 ,__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 ,__snake_case ):
__lowerCAmelCase : Any = min(
minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 ,-1 ,-1 ):
__lowerCAmelCase : str = min(
minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 293
|
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__lowerCAmelCase : Optional[Any] = (boundary[1] - boundary[0]) / steps
__lowerCAmelCase : List[Any] = boundary[0]
__lowerCAmelCase : Any = boundary[1]
__lowerCAmelCase : Dict = make_points(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : List[str] = 0.0
y += (h / 2.0) * f(__snake_case )
for i in x_i:
# print(i)
y += h * f(__snake_case )
y += (h / 2.0) * f(__snake_case )
return y
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : int = a + h
while x < (b - h):
yield x
__lowerCAmelCase : List[str] = x + h
def _lowercase ( __snake_case ) -> Tuple: # enter your function here
__lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
__lowerCAmelCase : Tuple = 0.0 # Lower bound of integration
__lowerCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__lowerCAmelCase : Dict = 10.0 # define number of steps or resolution
__lowerCAmelCase : Optional[Any] = [a, b] # define boundary of integration
__lowerCAmelCase : Optional[int] = method_a(__snake_case ,__snake_case )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 293
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase = TypeVar("T")
class UpperCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , a , a ) -> None:
snake_case_ = None
snake_case_ = len(lowercase__ )
snake_case_ = [any_type for _ in range(self.N )] + arr
snake_case_ = fnc
self.build()
def _UpperCamelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
snake_case_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCamelCase ( self , a , a ) -> None:
p += self.N
snake_case_ = v
while p > 1:
snake_case_ = p // 2
snake_case_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCamelCase ( self , a , a ) -> T | None: # noqa: E741
snake_case_ = l + self.N, r + self.N
snake_case_ = None
while l <= r:
if l % 2 == 1:
snake_case_ = self.st[l] if res is None else self.fn(lowercase__ , self.st[l] )
if r % 2 == 0:
snake_case_ = self.st[r] if res is None else self.fn(lowercase__ , self.st[r] )
snake_case_ = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase = SegmentTree(test_array, min)
lowercase = SegmentTree(test_array, max)
lowercase = SegmentTree(test_array, lambda a, b: a + b)
def __UpperCAmelCase ( ):
for i in range(len(lowerCAmelCase_)):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_)):
snake_case_ = reduce(lowerCAmelCase_ , test_array[i : j + 1])
snake_case_ = reduce(lowerCAmelCase_ , test_array[i : j + 1])
snake_case_ = reduce(lambda a_ , a_: a + b , test_array[i : j + 1])
assert min_range == min_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_)
assert max_range == max_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_)
assert sum_range == sum_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_)
test_all_segments()
for index, value in test_updates.items():
lowercase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 703
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self , a = 3 , a = 3 , a = ("DownEncoderBlock2D",) , a = ("UpDecoderBlock2D",) , a = (64,) , a = 1 , a = "silu" , a = 3 , a = 32 , a = 2_56 , a = 32 , a = None , a = 0.18_215 , a = "group" , ) -> Any:
super().__init__()
# pass init params to Encoder
snake_case_ = Encoder(
in_channels=a , out_channels=a , down_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , double_z=a , )
snake_case_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case_ = nn.Convad(a , a , 1 )
snake_case_ = VectorQuantizer(a , a , beta=0.25 , remap=a , sane_index_shape=a )
snake_case_ = nn.Convad(a , a , 1 )
# pass init params to Decoder
snake_case_ = Decoder(
in_channels=a , out_channels=a , up_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , norm_type=a , )
@apply_forward_hook
def _UpperCamelCase ( self , a , a = True ) -> VQEncoderOutput:
snake_case_ = self.encoder(a )
snake_case_ = self.quant_conv(a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=a )
@apply_forward_hook
def _UpperCamelCase ( self , a , a = False , a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case_ , snake_case_ , snake_case_ = self.quantize(a )
else:
snake_case_ = h
snake_case_ = self.post_quant_conv(a )
snake_case_ = self.decoder(a , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
def _UpperCamelCase ( self , a , a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case_ = sample
snake_case_ = self.encode(a ).latents
snake_case_ = self.decode(a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
| 607
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( A_ ):
A__ : Optional[Any] = "char"
A__ : Any = "bpe"
A__ : Optional[int] = "wp"
__lowerCamelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( A_ ):
A__ : int = ["image_processor", "char_tokenizer"]
A__ : int = "ViTImageProcessor"
A__ : Any = "MgpstrTokenizer"
def __init__(self : str , snake_case__ : Dict=None , snake_case__ : int=None , **snake_case__ : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
snake_case : Optional[Any] = kwargs.pop("feature_extractor" )
snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
snake_case : List[str] = tokenizer
snake_case : Any = AutoTokenizer.from_pretrained("gpt2" )
snake_case : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(snake_case__ , snake_case__ )
def __call__(self : Dict , snake_case__ : int=None , snake_case__ : Dict=None , snake_case__ : Optional[int]=None , **snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
snake_case : List[Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None:
snake_case : Optional[int] = self.char_tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case : Dict = encodings["input_ids"]
return inputs
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case , snake_case , snake_case : Dict = sequences
snake_case : List[Any] = char_preds.size(0 )
snake_case , snake_case : List[Any] = self._decode_helper(snake_case__ , "char" )
snake_case , snake_case : Optional[int] = self._decode_helper(snake_case__ , "bpe" )
snake_case , snake_case : Any = self._decode_helper(snake_case__ , "wp" )
snake_case : List[str] = []
snake_case : List[str] = []
for i in range(snake_case__ ):
snake_case : Optional[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case : Dict = scores.index(max(snake_case__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case : Union[str, Any] = {}
snake_case : Optional[int] = final_strs
snake_case : str = final_scores
snake_case : Optional[Any] = char_strs
snake_case : List[str] = bpe_strs
snake_case : int = wp_strs
return out
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if format == DecodeType.CHARACTER:
snake_case : List[Any] = self.char_decode
snake_case : List[str] = 1
snake_case : int = "[s]"
elif format == DecodeType.BPE:
snake_case : List[str] = self.bpe_decode
snake_case : Dict = 2
snake_case : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
snake_case : Optional[Any] = self.wp_decode
snake_case : str = 1_02
snake_case : Any = "[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""" )
snake_case , snake_case : Optional[Any] = [], []
snake_case : Tuple = pred_logits.size(0 )
snake_case : List[Any] = pred_logits.size(1 )
snake_case , snake_case : int = pred_logits.topk(1 , dim=-1 , largest=snake_case__ , sorted=snake_case__ )
snake_case : str = preds_index.view(-1 , snake_case__ )[:, 1:]
snake_case : Tuple = decoder(snake_case__ )
snake_case , snake_case : str = torch.nn.functional.softmax(snake_case__ , dim=2 ).max(dim=2 )
snake_case : Dict = preds_max_prob[:, 1:]
for index in range(snake_case__ ):
snake_case : Optional[Any] = preds_str[index].find(snake_case__ )
snake_case : List[str] = preds_str[index][:pred_eos]
snake_case : List[str] = preds_index[index].cpu().tolist()
snake_case : Optional[Any] = pred_index.index(snake_case__ ) if eos_token in pred_index else -1
snake_case : Tuple = preds_max_prob[index][: pred_eos_index + 1]
snake_case : Optional[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case__ )
conf_scores.append(snake_case__ )
return dec_strs, conf_scores
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : str = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(snake_case__ )]
return decode_strs
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(snake_case__ )]
return decode_strs
| 204
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
# Initialise PyTorch model
snake_case : Optional[int] = BertConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case : Any = BertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 204
| 1
|
def _SCREAMING_SNAKE_CASE ( snake_case_ : bytes ):
return "".join([hex(snake_case_ )[2:].zfill(2 ).upper() for byte in list(snake_case_ )] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 678
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> Dict:
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCamelCase_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = DPTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def lowerCamelCase_ ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase_ ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase_ ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 573
|
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573
| 1
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__( _A ):
'''simple docstring'''
return [ord(_A ) - 96 for elem in plain]
def __magic_name__( _A ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , _A )
print("""Decoded:""" , decode(_A ) )
if __name__ == "__main__":
main()
| 709
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
'''simple docstring'''
def A ( self : List[str] , lowercase : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCamelCase__ = [tuple(lowercase ) if isinstance(lowercase , lowercase ) else key for key in keys]
UpperCamelCase__ = Counter(lowercase )
UpperCamelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def A ( self : List[str] , lowercase : int , lowercase : str=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ = super().construct_mapping(lowercase , deep=lowercase )
self._check_no_duplicates_on_constructed_node(lowercase )
return mapping
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCamelCase__ = full_content[1:].index("""---""" ) + 1
UpperCamelCase__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_A )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Tuple = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def A ( cls : int , lowercase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase )
else:
return cls()
def A ( self : int , lowercase : Path ) -> Dict:
'''simple docstring'''
if path.exists():
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ = readme_file.read()
else:
UpperCamelCase__ = None
UpperCamelCase__ = self._to_readme(lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowercase )
def A ( self : Any , lowercase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(lowercase )
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def A ( cls : Tuple , lowercase : str ) -> "DatasetMetadata":
'''simple docstring'''
UpperCamelCase__ = yaml.load(lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCamelCase__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase )
def A ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase , allow_unicode=lowercase , encoding="""utf-8""" , ).decode("""utf-8""" )
lowerCamelCase_ : str = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase_ : Tuple = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowerCamelCase_ : str = ap.parse_args()
lowerCamelCase_ : List[str] = Path(args.readme_filepath)
lowerCamelCase_ : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 265
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self :Optional[int] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Optional[int] ) ->None:
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 441
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ : List[Any] = 256
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = ["melgan"]
def __init__( self : Dict , _snake_case : SpectrogramNotesEncoder , _snake_case : SpectrogramContEncoder , _snake_case : TaFilmDecoder , _snake_case : DDPMScheduler , _snake_case : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
A_ = math.log(1e-5 ) # Matches MelGAN training.
A_ = 4.0 # Largest value for most examples
A_ = 128
self.register_modules(
notes_encoder=_snake_case , continuous_encoder=_snake_case , decoder=_snake_case , scheduler=_snake_case , melgan=_snake_case , )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : str=(-1.0, 1.0) , _snake_case : int=False ) -> str:
"""simple docstring"""
A_ , A_ = output_range
if clip:
A_ = torch.clip(_snake_case , self.min_value , self.max_value )
# Scale to [0, 1].
A_ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase__ ( self : Dict , _snake_case : Tuple , _snake_case : Optional[Any]=(-1.0, 1.0) , _snake_case : List[str]=False ) -> List[str]:
"""simple docstring"""
A_ , A_ = input_range
A_ = torch.clip(_snake_case , _snake_case , _snake_case ) if clip else outputs
# Scale to [0, 1].
A_ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = input_tokens > 0
A_ , A_ = self.notes_encoder(
encoder_input_tokens=_snake_case , encoder_inputs_mask=_snake_case )
A_ , A_ = self.continuous_encoder(
encoder_inputs=_snake_case , encoder_inputs_mask=_snake_case )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
A_ = noise_time
if not torch.is_tensor(_snake_case ):
A_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
A_ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A_ = self.decoder(
encodings_and_masks=_snake_case , decoder_input_tokens=_snake_case , decoder_noise_time=_snake_case )
return logits
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : List[List[int]] , _snake_case : Optional[torch.Generator] = None , _snake_case : int = 100 , _snake_case : bool = True , _snake_case : str = "numpy" , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_snake_case )}.' )
A_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A_ = np.zeros([1, 0, self.n_dims] , np.floataa )
A_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device )
for i, encoder_input_tokens in enumerate(_snake_case ):
if i == 0:
A_ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A_ = ones
A_ = self.scale_features(
_snake_case , output_range=[-1.0, 1.0] , clip=_snake_case )
A_ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_snake_case , continuous_mask=_snake_case , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A_ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_snake_case , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_snake_case )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A_ = self.decode(
encodings_and_masks=_snake_case , input_tokens=_snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
A_ = self.scale_to_features(_snake_case , input_range=[-1.0, 1.0] )
A_ = mel[:1]
A_ = mel.cpu().float().numpy()
A_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case )
logger.info("Generated segment" , _snake_case )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
A_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A_ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_snake_case )
| 115
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase : Tuple = r"""\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"""
@add_start_docstrings(__snake_case )
class UpperCamelCase__ ( __snake_case ):
"""simple docstring"""
__magic_name__ = "rag"
__magic_name__ = True
def __init__( self , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=" / " , snake_case__=" // " , snake_case__=5 , snake_case__=300 , snake_case__=768 , snake_case__=8 , snake_case__="wiki_dpr" , snake_case__="train" , snake_case__="compressed" , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=0.0 , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
bos_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , is_encoder_decoder=_lowercase , prefix=_lowercase , vocab_size=_lowercase , **_lowercase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : Optional[int] = kwargs.pop('question_encoder' )
_lowerCAmelCase : List[str] = question_encoder_config.pop('model_type' )
_lowerCAmelCase : List[Any] = kwargs.pop('generator' )
_lowerCAmelCase : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(_lowercase , **_lowercase )
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(_lowercase , **_lowercase )
_lowerCAmelCase : Dict = reduce_loss
_lowerCAmelCase : Tuple = label_smoothing
_lowerCAmelCase : List[Any] = exclude_bos_score
_lowerCAmelCase : Optional[int] = do_marginalize
_lowerCAmelCase : int = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : str = n_docs
_lowerCAmelCase : int = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : Optional[int] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Tuple = retrieval_vector_size
_lowerCAmelCase : Dict = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Optional[Any] = index_path
_lowerCAmelCase : int = use_dummy_dataset
_lowerCAmelCase : List[str] = output_retrieved
_lowerCAmelCase : List[Any] = do_deduplication
_lowerCAmelCase : int = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : str = getattr(self.generator , 'forced_eos_token_id' , _lowercase )
@classmethod
def a ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_lowercase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[str] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 719
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = original_name.split('.' )[0]
_lowerCAmelCase = key.split('.' )
_lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
_lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
_lowerCAmelCase = orig_block_num - offset
_lowerCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase , _lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_lowerCAmelCase = key.replace('network', 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCAmelCase = key[: key.find('proj' )]
_lowerCAmelCase = key.replace(_UpperCamelCase, F'''patch_embeddings.{total_embed_found}.''' )
_lowerCAmelCase = key.replace('proj', 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_lowerCAmelCase = replace_key_with_offset(_UpperCamelCase, _UpperCamelCase, 'mlp.fc1', 'output.conv1' )
if "mlp.fc2" in key:
_lowerCAmelCase = replace_key_with_offset(_UpperCamelCase, _UpperCamelCase, 'mlp.fc2', 'output.conv2' )
if "norm1" in key:
_lowerCAmelCase = replace_key_with_offset(_UpperCamelCase, _UpperCamelCase, 'norm1', 'before_norm' )
if "norm2" in key:
_lowerCAmelCase = replace_key_with_offset(_UpperCamelCase, _UpperCamelCase, 'norm2', 'after_norm' )
if "layer_scale_1" in key:
_lowerCAmelCase = replace_key_with_offset(_UpperCamelCase, _UpperCamelCase, 'layer_scale_1', 'layer_scale_1' )
if "layer_scale_2" in key:
_lowerCAmelCase = replace_key_with_offset(_UpperCamelCase, _UpperCamelCase, 'layer_scale_2', 'layer_scale_2' )
if "head" in key:
_lowerCAmelCase = key.replace('head', 'classifier' )
_lowerCAmelCase = value
return new_state_dict
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(_UpperCamelCase, stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = model_name[-3:]
_lowerCAmelCase = 1_0_0_0
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = (1, 1_0_0_0)
# set config attributes
_lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase, _UpperCamelCase, repo_type='dataset' ), 'r' ) )
_lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCAmelCase = [2, 2, 6, 2]
_lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 0.9
elif size == "s24":
_lowerCAmelCase = [4, 4, 1_2, 4]
_lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 0.9
elif size == "s36":
_lowerCAmelCase = [6, 6, 1_8, 6]
_lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 1e-6
_lowerCAmelCase = 0.9
elif size == "m36":
_lowerCAmelCase = [6, 6, 1_8, 6]
_lowerCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 1e-6
_lowerCAmelCase = 0.95
elif size == "m48":
_lowerCAmelCase = [8, 8, 2_4, 8]
_lowerCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_lowerCAmelCase = 4.0
_lowerCAmelCase = 1e-6
_lowerCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_UpperCamelCase, return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_lowerCAmelCase = torch.load(_UpperCamelCase, map_location=torch.device('cpu' ) )
# rename keys
_lowerCAmelCase = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
_lowerCAmelCase = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
_lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
_lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' ).pixel_values
# forward pass
_lowerCAmelCase = model(_UpperCamelCase )
_lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCAmelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_lowerCAmelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_lowerCAmelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_lowerCAmelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_lowerCAmelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], _UpperCamelCase, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a__ : Union[str, Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 589
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__magic_name__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__magic_name__ : Optional[Any] = 12_8022
__magic_name__ : Dict = 12_8028
@require_sentencepiece
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MaMaaaTokenizer
snake_case__ = False
snake_case__ = False
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = ['</s>', '<unk>', 'โThis', 'โis', 'โa', 'โt', 'est', '\u0120', '<pad>']
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str , **_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = '</s>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['โThis', 'โis', 'โa', 'โt', 'est'] )
UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'This is a test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
snake_case__ = """facebook/m2m100_418M"""
snake_case__ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
snake_case__ = [
"""Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.""",
"""L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement""",
]
# fmt: off
snake_case__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
UpperCamelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
UpperCamelCase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
UpperCamelCase = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = 'fr'
UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCamelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCamelCase = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCamelCase = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 280
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'data2vec-vision'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE_=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.4 , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=255 , **SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = use_mask_token
lowerCamelCase_ = use_absolute_position_embeddings
lowerCamelCase_ = use_relative_position_bias
lowerCamelCase_ = use_shared_relative_position_bias
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase_ = out_indices
lowerCamelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ = use_auxiliary_head
lowerCamelCase_ = auxiliary_loss_weight
lowerCamelCase_ = auxiliary_channels
lowerCamelCase_ = auxiliary_num_convs
lowerCamelCase_ = auxiliary_concat_input
lowerCamelCase_ = semantic_loss_ignore_index
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase( self ) -> float:
'''simple docstring'''
return 1E-4
| 701
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _UpperCamelCase ( __UpperCamelCase ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _UpperCamelCase ( ) -> str:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase_ = [1, 2, 3]
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=2 )
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' ,[2, -1] )
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = [1, 2]
lowerCamelCase_ = {'a': 1, 'b': 2}
lowerCamelCase_ = {'a': [1, 2], 'b': [3, 4]}
lowerCamelCase_ = {'a': {'1': 1}, 'b': 2}
lowerCamelCase_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowerCamelCase_ = [2, 3]
lowerCamelCase_ = {'a': 2, 'b': 3}
lowerCamelCase_ = {'a': [2, 3], 'b': [4, 5]}
lowerCamelCase_ = {'a': {'1': 2}, 'b': 3}
lowerCamelCase_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
| 384
| 0
|
"""simple docstring"""
def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : int ):
"""simple docstring"""
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(__snake_case , __snake_case ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
_lowerCamelCase : Tuple = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCamelCase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 400
| 0
|
"""simple docstring"""
from datetime import datetime
import requests
def lowercase_ ( _lowerCamelCase: str ) -> bytes:
'''simple docstring'''
__lowerCamelCase : Dict = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__lowerCamelCase : Dict = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowerCamelCase ).content
if __name__ == "__main__":
__A = input('''Enter Video/IGTV url: ''').strip()
__A = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 366
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=18 , UpperCAmelCase : str=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=[0.5, 0.5, 0.5] , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , ):
__lowerCamelCase : Any = size if size is not None else {"shortest_edge": 18}
__lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCamelCase : List[str] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : int = min_resolution
__lowerCamelCase : List[Any] = max_resolution
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean
__lowerCamelCase : List[Any] = image_std
def lowerCamelCase__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[int] = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : Dict = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : List[str] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 366
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = FlaxAlbertModelTester(self )
@slow
def snake_case_( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""albert-base-v2""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , A )
_SCREAMING_SNAKE_CASE = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1e-4 ) )
| 314
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''distilbert'''
UpperCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , A=3_0522 , A=512 , A=False , A=6 , A=12 , A=768 , A=4 * 768 , A=0.1 , A=0.1 , A="gelu" , A=0.02 , A=0.1 , A=0.2 , A=0 , **A , ) -> Dict:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = sinusoidal_pos_embds
_SCREAMING_SNAKE_CASE = n_layers
_SCREAMING_SNAKE_CASE = n_heads
_SCREAMING_SNAKE_CASE = dim
_SCREAMING_SNAKE_CASE = hidden_dim
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = qa_dropout
_SCREAMING_SNAKE_CASE = seq_classif_dropout
super().__init__(**A , pad_token_id=A )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 314
| 1
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" ,["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" ,["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" ,[None, "v2"] )
def lowercase ( _a ,_a ,_a ) -> List[Any]:
UpperCAmelCase_: List[str] = hf_hub_url(repo_id=_a ,path=_a ,revision=_a )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_a )}"
| 306
|
class UpperCAmelCase__ :
def __init__( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Tuple = arr.split("," )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = [int(self.array[0] )] * len(self.array )
UpperCAmelCase_: List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCAmelCase_: Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCAmelCase_: Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_lowerCAmelCase = input("""please input some numbers:""")
_lowerCAmelCase = SubArray(whole_array)
_lowerCAmelCase = array.solve_sub_array()
print(("""the results is:""", re))
| 306
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=10 , __a=3 , __a=2 , __a=2 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a="divided_space_time" , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = patch_size
_UpperCamelCase = num_frames
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = attention_type
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = (num_frames) * self.num_patches_per_frame + 1
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_UpperCamelCase = self.num_labels
return config
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TimesformerModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TimesformerForVideoClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
# verify the logits shape
_UpperCamelCase = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TimesformerModelTester(self)
_UpperCamelCase = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self , __a , __a , __a=False) -> List[str]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__a)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TimesformerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
for model_class in self.all_model_classes:
_UpperCamelCase = self.model_tester.seq_length
_UpperCamelCase = self.model_tester.num_frames
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_UpperCamelCase = len(__a)
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
self.assertEqual(out_len + 1 , len(__a))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__a) , __a)
_UpperCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
_UpperCamelCase = np.load(__snake_case )
return list(__snake_case )
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''').to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_video()
_UpperCamelCase = image_processor(video[:8] , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 4_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-0.3016, -0.7713, -0.4205]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
| 19
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
snake_case_ = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
snake_case_ = """โ"""
class a__ ( _lowercase ):
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__(self : int, __UpperCAmelCase : Any, __UpperCAmelCase : List[str]="<s>", __UpperCAmelCase : int="</s>", __UpperCAmelCase : Optional[Any]="</s>", __UpperCAmelCase : int="<s>", __UpperCAmelCase : str="<unk>", __UpperCAmelCase : str="<pad>", __UpperCAmelCase : Dict="<mask>", __UpperCAmelCase : Optional[Dict[str, Any]] = None, **__UpperCAmelCase : int, ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(__UpperCAmelCase, lstrip=__UpperCAmelCase, rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase, __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase, eos_token=__UpperCAmelCase, unk_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **__UpperCAmelCase, )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE : int = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ (self : List[str], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ (self : List[str], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None, __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase, token_ids_a=__UpperCAmelCase, already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ (self : Tuple, __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ (self : Tuple ) -> Dict:
"""simple docstring"""
return len(self.sp_model )
def lowercase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ (self : List[str], __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase, out_type=__UpperCAmelCase )
def lowercase__ (self : int, __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.PieceToId(__UpperCAmelCase )
return spm_id if spm_id else self.unk_token_id
def lowercase__ (self : Optional[Any], __UpperCAmelCase : str ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowercase__ (self : Any, __UpperCAmelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : str = ''''''
SCREAMING_SNAKE_CASE : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : int = []
else:
current_sub_tokens.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __getstate__(self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__(self : Tuple, __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : str, __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
__UpperCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase, '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 714
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class a__ :
def __init__(self : Any, __UpperCAmelCase : int, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : List[Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : int, __UpperCAmelCase : List[str]=0.2, __UpperCAmelCase : Dict=0.2 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = bp_numa
SCREAMING_SNAKE_CASE : Optional[Any] = bp_numa
SCREAMING_SNAKE_CASE : str = bp_numa
SCREAMING_SNAKE_CASE : Dict = conva_get[:2]
SCREAMING_SNAKE_CASE : Union[str, Any] = conva_get[2]
SCREAMING_SNAKE_CASE : int = size_pa
SCREAMING_SNAKE_CASE : int = rate_w
SCREAMING_SNAKE_CASE : int = rate_t
SCREAMING_SNAKE_CASE : int = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : Dict = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(__UpperCAmelCase, '''wb''' ) as f:
pickle.dump(__UpperCAmelCase, __UpperCAmelCase )
print(F'''Model saved๏ผ {save_path}''' )
@classmethod
def lowercase__ (cls : str, __UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
with open(__UpperCAmelCase, '''rb''' ) as f:
SCREAMING_SNAKE_CASE : int = pickle.load(__UpperCAmelCase ) # noqa: S301
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE : Dict = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE : List[Any] = CNN(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# modify model parameter
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ (self : Optional[int], __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return round(__UpperCAmelCase, 3 )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Any, __UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = convs[0]
SCREAMING_SNAKE_CASE : int = convs[1]
SCREAMING_SNAKE_CASE : List[str] = np.shape(__UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i_focus in range(0, size_data - size_conv + 1, __UpperCAmelCase ):
for j_focus in range(0, size_data - size_conv + 1, __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for i_focus in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : Any = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Dict = np.asmatrix(__UpperCAmelCase ).reshape(
__UpperCAmelCase, __UpperCAmelCase )
data_featuremap.append(__UpperCAmelCase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(__UpperCAmelCase )
return focus_list, data_featuremap
def lowercase__ (self : Any, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Tuple, __UpperCAmelCase : List[str]="average_pool" ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE : str = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE : Tuple = []
for i_map in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : str = featuremaps[i_map]
SCREAMING_SNAKE_CASE : Any = []
for i_focus in range(0, __UpperCAmelCase, __UpperCAmelCase ):
for j_focus in range(0, __UpperCAmelCase, __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Tuple = np.asmatrix(__UpperCAmelCase ).reshape(__UpperCAmelCase, __UpperCAmelCase )
featuremap_pooled.append(__UpperCAmelCase )
return featuremap_pooled
def lowercase__ (self : Any, __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE : str = data[i].reshape(1, shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE : int = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(__UpperCAmelCase )
return data_expanded
def lowercase__ (self : Any, __UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = np.asarray(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def lowercase__ (self : Tuple, __UpperCAmelCase : List[Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Any, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = 0
for i_map in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Dict = np.ones((size_map, size_map) )
for i in range(0, __UpperCAmelCase, __UpperCAmelCase ):
for j in range(0, __UpperCAmelCase, __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : str = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE : Dict = i_pool + 1
SCREAMING_SNAKE_CASE : List[Any] = np.multiply(
__UpperCAmelCase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__UpperCAmelCase )
return pd_all
def lowercase__ (self : Optional[int], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : List[Any], __UpperCAmelCase : List[str], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Optional[int]=bool ) -> List[Any]:
"""simple docstring"""
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(__UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(__UpperCAmelCase )) )
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE : int = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE : Tuple = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.convolute(
__UpperCAmelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : Dict = self.pooling(__UpperCAmelCase, self.size_poolinga )
SCREAMING_SNAKE_CASE : Dict = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = self._expand(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = data_bp_input
SCREAMING_SNAKE_CASE : Tuple = np.dot(__UpperCAmelCase, self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : List[Any] = self.sig(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = np.dot(__UpperCAmelCase, self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(__UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE : Union[str, Any] = np.multiply(
(data_teach - bp_outa), np.multiply(__UpperCAmelCase, (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : List[Any] = np.multiply(
np.dot(__UpperCAmelCase, self.wkj ), np.multiply(__UpperCAmelCase, (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : Dict = np.dot(__UpperCAmelCase, self.vji )
SCREAMING_SNAKE_CASE : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE : Optional[Any] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE : Optional[Any] = self._calculate_gradient_from_pool(
__UpperCAmelCase, __UpperCAmelCase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE : Any = self.rate_weight * np.dot(__UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE : Union[str, Any] = rp + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCAmelCase )
def draw_error():
SCREAMING_SNAKE_CASE : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCAmelCase, '''+-''' )
plt.plot(__UpperCAmelCase, '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(__UpperCAmelCase, alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def lowercase__ (self : List[str], __UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(__UpperCAmelCase )) )
for p in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.convolute(
__UpperCAmelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : List[Any] = self.pooling(__UpperCAmelCase, self.size_poolinga )
SCREAMING_SNAKE_CASE : str = self._expand(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = data_bp_input
SCREAMING_SNAKE_CASE : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE : Dict = self.sig(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(__UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE : List[str] = [list(map(self.do_round, __UpperCAmelCase ) ) for each in produce_out]
return np.asarray(__UpperCAmelCase )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asmatrix(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.convolute(
__UpperCAmelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : List[Any] = self.pooling(__UpperCAmelCase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 355
| 0
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> Optional[Any]:
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : Optional[str] , lowercase_ : Optional[str] = None ) -> Union[str, Any]:
_lowerCamelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
_lowerCamelCase = to_pil_image(lowercase_ )
_lowerCamelCase , _lowerCamelCase = pil_image.size
_lowerCamelCase = pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_lowerCamelCase = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
_lowerCamelCase = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_lowerCamelCase = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_lowerCamelCase = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_lowerCamelCase = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_lowerCamelCase = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCamelCase = []
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
_lowerCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Dict = ['pixel_values']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = "" , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_lowerCamelCase = get_size_dict(lowerCamelCase__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = apply_ocr
_lowerCamelCase = ocr_lang
_lowerCamelCase = tesseract_config
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowerCamelCase = (size['''height'''], size['''width'''])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(lowerCamelCase__ )
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_lowerCamelCase = []
_lowerCamelCase = []
for image in images:
_lowerCamelCase , _lowerCamelCase = apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
_lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCamelCase = [flip_channel_order(lowerCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCamelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
_lowerCamelCase = words_batch
_lowerCamelCase = boxes_batch
return data
| 661
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : List[Any] = object
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : str ) -> str:
pass
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_( lowercase_ : Namespace ) -> List[Any]:
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase_ , args.host , args.port , args.workers )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : dict
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def snake_case__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def snake_case__ ( self , lowerCamelCase__=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(lowerCamelCase__ )} )
| 661
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : str , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Any , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : int ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : Any , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 409
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> None:
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 409
| 1
|
def lowerCAmelCase__(__snake_case ,__snake_case ) -> None:
'''simple docstring'''
lowerCamelCase__ = len(__snake_case )
print('''The following activities are selected:''' )
# The first activity is always selected
lowerCamelCase__ = 0
print(__snake_case ,end=''',''' )
# Consider rest of the activities
for j in range(__snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__snake_case ,end=''',''' )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = [1, 3, 0, 5, 8, 5]
_a = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 481
|
from __future__ import annotations
_a = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(__lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__lowerCAmelCase )
return self.shortest_path(__lowerCAmelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_a = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 481
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ : Any = ShapEImgaImgPipeline
UpperCAmelCase_ : Any = ["""image"""]
UpperCAmelCase_ : Union[str, Any] = ["""image"""]
UpperCAmelCase_ : Optional[int] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase_ : Tuple = False
@property
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
return 3_2
@property
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
return 3_2
@property
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCAmelCase( self : int ):
'''simple docstring'''
return 8
@property
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
snake_case__ = CLIPVisionModel(__UpperCamelCase )
return model
@property
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__UpperCamelCase , do_normalize=__UpperCamelCase , do_resize=__UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
@property
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_6,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 3_2,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
snake_case__ = PriorTransformer(**__UpperCamelCase )
return model
@property
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ = {
"""param_shapes""": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 1_2,
"""background""": (
0.1,
0.1,
0.1,
),
}
snake_case__ = ShapERenderer(**__UpperCamelCase )
return model
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = self.dummy_prior
snake_case__ = self.dummy_image_encoder
snake_case__ = self.dummy_image_processor
snake_case__ = self.dummy_renderer
snake_case__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=__UpperCamelCase , clip_sample=__UpperCamelCase , clip_sample_range=1.0 , )
snake_case__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __lowerCAmelCase( self : str , __UpperCamelCase : Any , __UpperCamelCase : int=0 ):
'''simple docstring'''
snake_case__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("""mps""" ):
snake_case__ = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 3_2,
"""output_type""": """np""",
}
return inputs
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = """cpu"""
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**__UpperCamelCase )
snake_case__ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ = output.images[0]
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = torch_device == """cpu"""
snake_case__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**__UpperCamelCase )
snake_case__ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ = 1
snake_case__ = 2
snake_case__ = self.get_dummy_inputs(__UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ = batch_size * [inputs[key]]
snake_case__ = pipe(**__UpperCamelCase , num_images_per_prompt=__UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__( unittest.TestCase ):
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
snake_case__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
snake_case__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
snake_case__ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case__ = pipe(
__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 709
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ : Tuple = DebertaVaTokenizer
UpperCAmelCase_ : Any = DebertaVaTokenizerFast
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Tuple = True
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase( self : Optional[int] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case__ = """this is a test"""
snake_case__ = """this is a test"""
return input_text, output_text
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = """<pad>"""
snake_case__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__UpperCamelCase ) , 3_0_0_0_1 )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = """ \tHeLLo!how \n Are yoU? """
snake_case__ = ["""โhello""", """!""", """how""", """โare""", """โyou""", """?"""]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsรฉ."""
snake_case__ = ["""โ""", """<unk>""", """โwas""", """โborn""", """โin""", """โ9""", """2000""", """โ""", """,""", """โand""", """โthis""", """โis""", """โfal""", """s""", """<unk>""", """โ""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsรฉ."""
snake_case__ = ["""โi""", """โwas""", """โborn""", """โin""", """โ9""", """2000""", """โ""", """,""", """โand""", """โthis""", """โis""", """โfal""", """s""", """<unk>""", """โ""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsรฉ."""
snake_case__ = ["""โi""", """โwas""", """โborn""", """โin""", """โ9""", """2000""", """,""", """โand""", """โthis""", """โis""", """โfal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsรฉ."""
snake_case__ = ["""โ""", """<unk>""", """โwas""", """โborn""", """โin""", """โ9""", """2000""", """โ""", """,""", """โand""", """โthis""", """โis""", """โfal""", """s""", """<unk>""", """โ""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = """ \tHeLLo!how \n Are yoU? """
snake_case__ = ["""โ""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """โ""", """<unk>""", """re""", """โyo""", """<unk>""", """?"""]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = """I was born in 92000, and this is falsรฉ."""
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(__UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = """This is a test"""
snake_case__ = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
snake_case__ = ["""โ""", """T""", """his""", """โis""", """โa""", """โtest"""]
snake_case__ = ["""โ""", """<unk>""", """his""", """โis""", """โa""", """โtest"""]
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# fmt: off
snake_case__ = """I was born in 92000, and this is falsรฉ."""
snake_case__ = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
snake_case__ = ["""โ""", """I""", """โwas""", """โborn""", """โin""", """โ9""", """2000""", """,""", """โand""", """โthis""", """โis""", """โfal""", """s""", """รฉ""", """.""", ]
snake_case__ = ["""โ""", """<unk>""", """โwas""", """โborn""", """โin""", """โ9""", """2000""", """,""", """โand""", """โthis""", """โis""", """โfal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = DebertaVaTokenizer(__UpperCamelCase )
snake_case__ = tokenizer.encode("""sequence builders""" )
snake_case__ = tokenizer.encode("""multi-sequence build""" )
snake_case__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCamelCase , )
@slow
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 566
| 0
|
import doctest
from collections import deque
import numpy as np
class __snake_case :
def __init__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = [2, 1, 2, -1]
_lowerCAmelCase : List[str] = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = len(self.first_signal )
_lowerCAmelCase : int = len(self.second_signal )
_lowerCAmelCase : Optional[int] = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
_lowerCAmelCase : Optional[int] = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
_lowerCAmelCase : Any = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowerCAmelCase : int = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 429
|
from __future__ import annotations
import requests
lowerCAmelCase = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ):
__UpperCAmelCase : List[Any] = f"Invalid search term: {invalid_search_terms}"
raise ValueError(lowercase_ )
__UpperCAmelCase : Optional[int] = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
__UpperCAmelCase : List[str] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )}
__UpperCAmelCase : List[Any] = {}
for id_ in range(lowercase_ ):
__UpperCAmelCase : str = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 462
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
SCREAMING_SNAKE_CASE : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
SCREAMING_SNAKE_CASE : Dict = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(res.text, "html.parser")
SCREAMING_SNAKE_CASE : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 354
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = (DPMSolverSinglestepScheduler,)
lowercase_ : List[str] = (("""num_inference_steps""", 25),)
def UpperCamelCase ( self, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**lowerCamelCase)
return config
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Union[str, Any] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : Optional[int] = self.dummy_sample
_lowercase : Optional[int] = 0.1 * sample
_lowercase : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config(**lowerCamelCase)
_lowercase : List[Any] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase)
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase : List[Any] = sample, sample
for t in range(lowerCamelCase, time_step + scheduler.config.solver_order + 1):
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : int = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = dict(self.forward_default_kwargs)
_lowercase : List[str] = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : List[str] = self.dummy_sample
_lowercase : str = 0.1 * sample
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config()
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : List[Any] = scheduler_class.from_pretrained(lowerCamelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residual (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : List[Any] = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self, lowerCamelCase=None, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if scheduler is None:
_lowercase : str = self.scheduler_classes[0]
_lowercase : int = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Optional[Any] = scheduler_class(**lowerCamelCase)
_lowercase : List[Any] = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Optional[int] = model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
return sample
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : Optional[int] = 50
_lowercase : Union[str, Any] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
_lowercase : Optional[Any] = model(lowerCamelCase, lowerCamelCase)
_lowercase : int = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_5_7_4) < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
_lowercase : List[str] = self.full_loop(scheduler=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
_lowercase : str = DEISMultistepScheduler.from_config(scheduler.config)
_lowercase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config)
_lowercase : Tuple = UniPCMultistepScheduler.from_config(scheduler.config)
_lowercase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config)
_lowercase : Any = self.full_loop(scheduler=lowerCamelCase)
_lowercase : Optional[int] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase, prediction_type=lowerCamelCase, sample_max_value=lowerCamelCase, algorithm_type='dpmsolver++', solver_order=lowerCamelCase, solver_type=lowerCamelCase, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
_lowercase : Optional[Any] = self.full_loop(
solver_order=lowerCamelCase, solver_type=lowerCamelCase, prediction_type=lowerCamelCase, algorithm_type=lowerCamelCase, )
assert not torch.isnan(lowerCamelCase).any(), "Samples have nan numbers"
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase)
self.check_over_configs(lower_order_final=lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase)
self.check_over_configs(variance_type='learned_range')
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=0)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.full_loop()
_lowercase : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_7_9_1) < 1E-3
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(use_karras_sigmas=lowerCamelCase)
_lowercase : List[str] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.2_2_4_8) < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction')
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.1_4_5_3) < 1E-3
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCamelCase)
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_mean.item() - 0.0_6_4_9) < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = self.scheduler_classes[0]
_lowercase : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase, dynamic_thresholding_ratio=0)
_lowercase : Any = scheduler_class(**lowerCamelCase)
_lowercase : str = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowercase : Tuple = model(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
assert sample.dtype == torch.floataa
| 354
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_UpperCamelCase = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
__snake_case : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__snake_case : Any = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__snake_case : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__snake_case : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__snake_case : Tuple = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
__snake_case : List[Any] = os.path.join(self.transformer_dir , "new_code.py" )
with open(UpperCamelCase__ , "w" , newline="\n" ) as f:
f.write(UpperCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase__ )
with open(UpperCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , UpperCamelCase__ )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Any = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
# Copy consistency with a really long name
__snake_case : List[Any] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , UpperCamelCase__ , UpperCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCamelCase__ , overwrite_result=re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : str = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
__snake_case : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
__snake_case : Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (ๆฅ่ช Google Research and the"
" Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n"
)
__snake_case : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (ๆฅ่ช Google Research and the"
" Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (ๆฅ่ช HuggingFace) ไผด้่ฎบๆ"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) ็ฑ Victor Sanh, Lysandre Debut and Thomas Wolf ๅๅธใ The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (ๆฅ่ช"
" Google Research/Stanford University) ไผด้่ฎบๆ [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) ็ฑ Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning ๅๅธใ\n"
)
__snake_case , __snake_case : Union[str, Any] = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
self.assertFalse(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__snake_case , __snake_case : Union[str, Any] = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase__ )
__snake_case : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
__snake_case : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (ๆฅ่ช Google Research and"
" the Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n"
)
__snake_case : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (ๆฅ่ช Google Research and the"
" Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n"
)
__snake_case , __snake_case : Union[str, Any] = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 243
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
lowerCamelCase_ = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowerCamelCase_ = 1
lowerCamelCase_ = 2
while i * i <= n:
while n % i == 0:
lowerCamelCase_ = i
n //= i
i += 1
if n > 1:
lowerCamelCase_ = n
return int(_lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 142
| 0
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowercase ( lowerCamelCase__ : int ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowercase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowercase ( ):
_a = "mock-s3-bucket"
_a = F'''s3://{mock_bucket}'''
_a = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path.startswith("s3://" ) is False
_a = "./local/path"
_a = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path == new_dataset_path
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is True
_a = fsspec.filesystem("file" )
_a = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[int] ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
_a = input_paths[compression_fs_class.protocol]
if input_path is None:
_a = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
_a = fsspec.filesystem(compression_fs_class.protocol, fo=lowerCamelCase__ )
assert isinstance(lowerCamelCase__, lowerCamelCase__ )
_a = os.path.basename(lowerCamelCase__ )
_a = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(lowerCamelCase__, "r", encoding="utf-8" ) as f, open(lowerCamelCase__, encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : int, lowerCamelCase__ : str ):
_a = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
_a = compressed_file_paths[protocol]
_a = "dataset.jsonl"
_a = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
_a , *_a = fsspec.get_fs_token_paths(lowerCamelCase__ )
assert fs.isfile(lowerCamelCase__ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[Any] ):
_a = hf_api.dataset_info(lowerCamelCase__, token=lowerCamelCase__ )
_a = HfFileSystem(repo_info=lowerCamelCase__, token=lowerCamelCase__ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(lowerCamelCase__ ) as f:
assert hffs.open("data/text_data.txt", "r" ).read() == f.read()
def _lowercase ( ):
_a = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase__, lowerCamelCase__, clobber=lowerCamelCase__ )
with pytest.warns(lowerCamelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 691
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18
|
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = int(UpperCAmelCase__ )
if n_element < 1:
a_ = ValueError("""a should be a positive number""" )
raise my_error
a_ = [1]
a_ , a_ , a_ = (0, 0, 0)
a_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
A_ : int =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
A_ : Any =hamming(int(n))
print("""-----------------------------------------------------""")
print(F'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""")
| 483
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
SCREAMING_SNAKE_CASE_: List[str] ='\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and ลukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
SCREAMING_SNAKE_CASE_: Dict ='\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
SCREAMING_SNAKE_CASE_: Any ='\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _lowercase (self : Optional[int] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 713
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE_: Dict ='pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_: str ='tf'
else:
SCREAMING_SNAKE_CASE_: str ='jax'
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[Any] = PerceiverTokenizer
a__ : Union[str, Any] = False
def _lowercase (self : int ):
super().setUp()
UpperCAmelCase_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase (self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def _lowercase (self : List[str] , **__a : Dict ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def _lowercase (self : str , __a : Dict , __a : Dict=False , __a : List[str]=20 , __a : Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase_ = []
for i in range(len(__a ) ):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=__a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase_ = list(filter(lambda __a : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __a ) )
UpperCAmelCase_ = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
UpperCAmelCase_ = " " + output_txt
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = "Unicode โฌ."
UpperCAmelCase_ = tokenizer(__a )
UpperCAmelCase_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]Unicode โฌ.[SEP]" )
UpperCAmelCase_ = tokenizer("e รจ รฉ รช รซ" )
UpperCAmelCase_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]e รจ รฉ รช รซ[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e รจ รฉ รช รซ" ) ) , "[CLS]e รจ รฉ รช รซ[SEP]" )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(__a , padding=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase_ = tokenizer(__a , padding=__a , return_tensors=__a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __a )
self.assertIn("attention_mask" , __a )
self.assertNotIn("decoder_input_ids" , __a )
self.assertNotIn("decoder_attention_mask" , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase_ = tokenizer(
text_target=__a , max_length=32 , padding="max_length" , truncation=__a , return_tensors=__a )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowercase (self : Any ):
# safety check on max_len default value so we are sure the test works
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase_ = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
shutil.rmtree(__a )
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase_ = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__a )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__a )
with open(os.path.join(__a , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__a )
with open(os.path.join(__a , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = [f"""<extra_id_{i}>""" for i in range(125 )]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__a , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
with open(os.path.join(__a , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
__a , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__a )]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _lowercase (self : int ):
UpperCAmelCase_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "๏ฟฝ" )
def _lowercase (self : Optional[int] ):
pass
def _lowercase (self : List[str] ):
pass
def _lowercase (self : Tuple ):
pass
def _lowercase (self : List[Any] ):
pass
def _lowercase (self : int ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCAmelCase_ = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(__a , __a )
| 415
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_SCREAMING_SNAKE_CASE : Optional[int] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : int=7 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Dict=18 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[int]=None , ):
_lowercase: str = size if size is not None else {'height': 20, 'width': 20}
_lowercase: List[Any] = parent
_lowercase: int = batch_size
_lowercase: int = num_channels
_lowercase: Any = image_size
_lowercase: Any = min_resolution
_lowercase: Optional[Any] = max_resolution
_lowercase: Dict = size
_lowercase: Dict = do_normalize
_lowercase: str = do_convert_rgb
_lowercase: Any = [512, 1_024, 2_048, 4_096]
_lowercase: Dict = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCAmelCase__ ( self : Tuple):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase__ ( self : Any):
_lowercase: List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_lowercase: Tuple = Image.open(requests.get(__snake_case , stream=__snake_case).raw).convert("RGB")
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple):
_lowercase: int = PixaStructImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self : str):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any):
_lowercase: str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__snake_case , "do_normalize"))
self.assertTrue(hasattr(__snake_case , "do_convert_rgb"))
def UpperCAmelCase__ ( self : str):
_lowercase: Optional[int] = self.image_processor_tester.prepare_dummy_image()
_lowercase: Any = self.image_processing_class(**self.image_processor_dict)
_lowercase: List[Any] = 2_048
_lowercase: List[Any] = image_processor(__snake_case , return_tensors="pt" , max_patches=__snake_case)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6) , atol=1e-3 , rtol=1e-3))
def UpperCAmelCase__ ( self : Dict):
# Initialize image_processor
_lowercase: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case)
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image)
# Test not batched input
_lowercase: Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase: Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowercase: str = image_processor(
__snake_case , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : Optional[Any]):
# Initialize image_processor
_lowercase: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case)
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image)
# Test not batched input
_lowercase: Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_lowercase: Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__snake_case):
_lowercase: int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__snake_case).flattened_patches
_lowercase: Union[str, Any] = 'Hello'
_lowercase: int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__snake_case , header_text=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowercase: Any = image_processor(
__snake_case , return_tensors="pt" , max_patches=__snake_case , header_text=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : Optional[int]):
# Initialize image_processor
_lowercase: Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case)
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray)
_lowercase: int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase: List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowercase: Any = image_processor(
__snake_case , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : int):
# Initialize image_processor
_lowercase: List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case)
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor)
# Test not batched input
_lowercase: str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase: List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowercase: Union[str, Any] = image_processor(
__snake_case , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Dict):
_lowercase: Any = PixaStructImageProcessingTester(self , num_channels=4)
_lowercase: Union[str, Any] = 3
@property
def UpperCAmelCase__ ( self : int):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__snake_case , "do_normalize"))
self.assertTrue(hasattr(__snake_case , "do_convert_rgb"))
def UpperCAmelCase__ ( self : Optional[Any]):
# Initialize image_processor
_lowercase: Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case)
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image)
# Test not batched input
_lowercase: Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase: Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowercase: Optional[Any] = image_processor(
__snake_case , return_tensors="pt" , max_patches=__snake_case).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 226
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Tuple = logging.get_logger(__name__)
lowerCAmelCase: Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class a__( lowerCamelCase__ ):
lowercase__ = """pegasus"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , __snake_case : Tuple=5_02_65 , __snake_case : List[Any]=10_24 , __snake_case : Optional[Any]=12 , __snake_case : Tuple=40_96 , __snake_case : str=16 , __snake_case : int=12 , __snake_case : Tuple=40_96 , __snake_case : int=16 , __snake_case : Tuple=0.0 , __snake_case : Any=0.0 , __snake_case : Any=True , __snake_case : Optional[int]=True , __snake_case : Any="gelu" , __snake_case : str=10_24 , __snake_case : Dict=0.1 , __snake_case : List[Any]=0.0 , __snake_case : str=0.0 , __snake_case : str=0.02 , __snake_case : Union[str, Any]=0 , __snake_case : List[Any]=False , __snake_case : Optional[Any]=0 , __snake_case : str=1 , __snake_case : Any=1 , **__snake_case : int , ):
a : Optional[int] = vocab_size
a : List[str] = max_position_embeddings
a : List[Any] = d_model
a : Tuple = encoder_ffn_dim
a : List[str] = encoder_layers
a : str = encoder_attention_heads
a : str = decoder_ffn_dim
a : Optional[int] = decoder_layers
a : Optional[int] = decoder_attention_heads
a : Optional[int] = dropout
a : Any = attention_dropout
a : str = activation_dropout
a : List[Any] = activation_function
a : Union[str, Any] = init_std
a : List[Any] = encoder_layerdrop
a : Union[str, Any] = decoder_layerdrop
a : List[Any] = use_cache
a : Any = encoder_layers
a : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
@property
def lowercase_ ( self : int ):
return self.encoder_attention_heads
@property
def lowercase_ ( self : int ):
return self.d_model
| 526
| 0
|
import argparse
from collections import defaultdict
import yaml
A__: str = '''docs/source/en/_toctree.yml'''
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Dict = defaultdict(A_)
UpperCamelCase__: Dict = []
UpperCamelCase__: str = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]})
else:
new_doc_list.append(A_)
UpperCamelCase__: str = new_doc_list
UpperCamelCase__: List[Any] = [key for key, value in counts.items() if value > 1]
UpperCamelCase__: Optional[Any] = []
for duplicate_key in duplicates:
UpperCamelCase__: Dict = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key})
if len(A_) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others.")
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1])
UpperCamelCase__: Dict = sorted(A_ ,key=lambda A_: s["title"].lower())
# "overview" gets special treatment and is always first
if len(A_) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed.")
overview_doc.extend(A_)
# Sort
return overview_doc
def lowerCAmelCase_ ( A_=False):
with open(A_ ,encoding="utf-8") as f:
UpperCamelCase__: List[str] = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase__: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__: Dict = content[api_idx]["sections"]
# Then to the model doc
UpperCamelCase__: int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__: Optional[int] = api_doc[scheduler_idx]["sections"]
UpperCamelCase__: Optional[int] = clean_doc_toc(A_)
UpperCamelCase__: Tuple = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__: Dict = True
if overwrite:
UpperCamelCase__: Tuple = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__: Optional[int] = api_doc
with open(A_ ,"w" ,encoding="utf-8") as f:
f.write(yaml.dump(A_ ,allow_unicode=A_))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this.")
def lowerCAmelCase_ ( A_=False):
with open(A_ ,encoding="utf-8") as f:
UpperCamelCase__: List[str] = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase__: List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__: Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCamelCase__: str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__: Any = False
UpperCamelCase__: Tuple = api_doc[pipeline_idx]["sections"]
UpperCamelCase__: int = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__: List[Any] = pipeline_doc["section"]
UpperCamelCase__: Optional[int] = clean_doc_toc(A_)
if overwrite:
UpperCamelCase__: str = new_sub_pipeline_doc
new_pipeline_docs.append(A_)
# sort overall pipeline doc
UpperCamelCase__: Any = clean_doc_toc(A_)
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__: List[Any] = True
if overwrite:
UpperCamelCase__: Optional[Any] = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__: Any = api_doc
with open(A_ ,"w" ,encoding="utf-8") as f:
f.write(yaml.dump(A_ ,allow_unicode=A_))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this.")
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A__: int = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 221
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" ,[None, 4_00 * 2**20, 6_00 * 2**20])
@pytest.mark.parametrize("input_in_memory_max_size" ,["default", 0, 1_00 * 2**20, 9_00 * 2**20])
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,"IN_MEMORY_MAX_SIZE" ,A_)
UpperCamelCase__: List[str] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase__: List[Any] = dataset_size < in_memory_max_size
else:
UpperCamelCase__: int = False
UpperCamelCase__: int = is_small_dataset(A_)
assert result == expected
| 221
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = FunnelConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FunnelBaseModel(lowerCamelCase_ ) if base_model else FunnelModel(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
UpperCamelCase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 105
|
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 320
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =StableDiffusionInpaintPipeline
UpperCAmelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase =frozenset([] )
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
torch.manual_seed(0)
_UpperCAmelCase : Tuple =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case , )
_UpperCAmelCase : int =PNDMScheduler(skip_prk_steps=snake_case)
torch.manual_seed(0)
_UpperCAmelCase : Optional[int] =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
_UpperCAmelCase : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
_UpperCAmelCase : Any =CLIPTextModel(snake_case)
_UpperCAmelCase : Dict =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCAmelCase : int ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self , snake_case , snake_case=0) -> Tuple:
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_UpperCAmelCase : Dict =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case)).to(snake_case)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCAmelCase : Any =Image.fromarray(np.uinta(snake_case)).convert('RGB').resize((6_4, 6_4))
_UpperCAmelCase : int =Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((6_4, 6_4))
if str(snake_case).startswith('mps'):
_UpperCAmelCase : Union[str, Any] =torch.manual_seed(snake_case)
else:
_UpperCAmelCase : str =torch.Generator(device=snake_case).manual_seed(snake_case)
_UpperCAmelCase : Optional[int] ={
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] ='cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[int] =self.get_dummy_components()
_UpperCAmelCase : List[str] =StableDiffusionInpaintPipeline(**snake_case)
_UpperCAmelCase : List[str] =sd_pipe.to(snake_case)
sd_pipe.set_progress_bar_config(disable=snake_case)
_UpperCAmelCase : List[str] =self.get_dummy_inputs(snake_case)
_UpperCAmelCase : List[str] =sd_pipe(**snake_case).images
_UpperCAmelCase : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : List[Any] =np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_UpperCAmelCase : str =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_UpperCAmelCase : List[Any] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
_UpperCAmelCase : int ='stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase : Tuple =StableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case)
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
pipe.enable_attention_slicing()
_UpperCAmelCase : int ='Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase : Optional[Any] =torch.manual_seed(0)
_UpperCAmelCase : List[str] =pipe(
prompt=snake_case , image=snake_case , mask_image=snake_case , generator=snake_case , output_type='np' , )
_UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9E-3
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_UpperCAmelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_UpperCAmelCase : List[str] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
_UpperCAmelCase : str ='stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase : Optional[Any] =StableDiffusionInpaintPipeline.from_pretrained(
snake_case , torch_dtype=torch.floataa , safety_checker=snake_case , )
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
pipe.enable_attention_slicing()
_UpperCAmelCase : Any ='Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase : Optional[Any] =torch.manual_seed(0)
_UpperCAmelCase : Dict =pipe(
prompt=snake_case , image=snake_case , mask_image=snake_case , generator=snake_case , output_type='np' , )
_UpperCAmelCase : int =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5E-1
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Dict =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_UpperCAmelCase : int =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_UpperCAmelCase : List[Any] ='stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase : int =PNDMScheduler.from_pretrained(snake_case , subfolder='scheduler')
_UpperCAmelCase : int =StableDiffusionInpaintPipeline.from_pretrained(
snake_case , safety_checker=snake_case , scheduler=snake_case , torch_dtype=torch.floataa , )
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : List[str] ='Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase : Optional[int] =torch.manual_seed(0)
_UpperCAmelCase : Tuple =pipe(
prompt=snake_case , image=snake_case , mask_image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , )
_UpperCAmelCase : int =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 331
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
lowercase ={
'RUCAIBox/mvp': 1024,
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =["input_ids", "attention_mask"]
UpperCAmelCase =MvpTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
_UpperCAmelCase : Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =getattr(snake_case , pre_tok_state.pop('type'))
_UpperCAmelCase : Union[str, Any] =add_prefix_space
_UpperCAmelCase : Optional[Any] =pre_tok_class(**snake_case)
_UpperCAmelCase : Union[str, Any] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase : List[Any] ='post_processor'
_UpperCAmelCase : Optional[int] =getattr(self.backend_tokenizer , snake_case , snake_case)
if tokenizer_component_instance:
_UpperCAmelCase : int =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any =tuple(state['sep'])
if "cls" in state:
_UpperCAmelCase : List[str] =tuple(state['cls'])
_UpperCAmelCase : str =False
if state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =add_prefix_space
_UpperCAmelCase : Optional[int] =True
if state.get('trim_offsets' , snake_case) != trim_offsets:
_UpperCAmelCase : Union[str, Any] =trim_offsets
_UpperCAmelCase : Tuple =True
if changes_to_apply:
_UpperCAmelCase : str =getattr(snake_case , state.pop('type'))
_UpperCAmelCase : List[Any] =component_class(**snake_case)
setattr(self.backend_tokenizer , snake_case , snake_case)
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case) if isinstance(snake_case , snake_case) else value
_UpperCAmelCase : Any =value
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Any =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : str =self._tokenizer.model.save(snake_case , name=snake_case)
return tuple(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case=None) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =[self.sep_token_id]
_UpperCAmelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 331
| 1
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a : int = 2_048
a : Optional[int] = 4_096
a : Dict = 42
a : Optional[int] = os.environ.pop('''PROCESS_TRAIN''', '''false''')
a : List[str] = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
def choose_first(_UpperCAmelCase : str , _UpperCAmelCase : int=False ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
__snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__snake_case = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
__snake_case = {"id": example["id"]}
__snake_case = example["annotations"]
__snake_case = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
__snake_case = ["yes"] if 1 in yes_no_answer else ["no"]
__snake_case = __snake_case = []
__snake_case = __snake_case = []
__snake_case = ["<cls>"]
else:
__snake_case = ["short"]
__snake_case = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
__snake_case = ["long"]
__snake_case = choose_first(annotation["long_answer"] , is_long_answer=_UpperCAmelCase )
__snake_case = []
answer.update(_UpperCAmelCase )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
__snake_case = True
else:
__snake_case = False
__snake_case = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , _UpperCAmelCase ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=False ) -> List[Any]:
__snake_case = _get_single_answer(_UpperCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case = example["document"]["tokens"]
__snake_case = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(_UpperCAmelCase ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__snake_case = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__snake_case = example["document"]["tokens"]
__snake_case = answer["start_token"]
__snake_case = answer["end_token"]
__snake_case = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__snake_case = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
__snake_case = doc["is_html"][answer["start_token"] : answer["end_token"]]
__snake_case = doc["token"][answer["start_token"] : answer["end_token"]]
__snake_case = " ".join([old[i] for i in range(len(_UpperCAmelCase ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , _UpperCAmelCase , end="\n" )
print("Old:" , _UpperCAmelCase , end="\n\n" )
return {
"context": " ".join(_UpperCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any=20_48 , _UpperCAmelCase : str=40_96 , _UpperCAmelCase : Any=True ) -> Optional[Any]:
# overlap will be of doc_stride - q_len
__snake_case = get_context_and_ans(_UpperCAmelCase , assertion=_UpperCAmelCase )
__snake_case = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__snake_case = tokenizer(example["question"]["text"] , out["context"] ).input_ids
__snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case = []
__snake_case = []
__snake_case = input_ids[:q_len]
__snake_case = range(_UpperCAmelCase , len(_UpperCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__snake_case = i + max_length - q_len
__snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(_UpperCAmelCase ),
"end_token": [-1_00] * len(_UpperCAmelCase ),
"category": category,
},
}
__snake_case = out["context"].split()
__snake_case = splitted_context[answer["end_token"]]
__snake_case = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=_UpperCAmelCase , ).input_ids )
__snake_case = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=_UpperCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__snake_case = len(tokenizer(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__snake_case = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
__snake_case = answer["start_token"]
__snake_case = answer["end_token"]
if assertion:
__snake_case = tokenizer.decode(_UpperCAmelCase )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , _UpperCAmelCase , end="\n\n" )
if len(_UpperCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__snake_case = input_ids[:q_len]
__snake_case = range(_UpperCAmelCase , len(_UpperCAmelCase ) , max_length - doc_stride )
__snake_case = []
__snake_case = []
__snake_case = []
__snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
__snake_case = i + max_length - q_len
__snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__snake_case = start_token - i + q_len
__snake_case = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
__snake_case = -1_00
__snake_case = -1_00
answers_category.append("null" )
__snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCAmelCase )
answers_end_token.append(_UpperCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(_UpperCAmelCase ) )
print("Old:" , tokenizer.decode(_UpperCAmelCase ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=20_48 , _UpperCAmelCase : List[Any]=40_96 , _UpperCAmelCase : Optional[Any]=False ) -> Tuple:
__snake_case = get_strided_contexts_and_ans(
_UpperCAmelCase , _UpperCAmelCase , doc_stride=_UpperCAmelCase , max_length=_UpperCAmelCase , assertion=_UpperCAmelCase , )
return example
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> int:
with jsonlines.open(_UpperCAmelCase , "a" ) as writer:
for example in tqdm(_UpperCAmelCase , total=len(_UpperCAmelCase ) , desc="Saving samples ... " ):
__snake_case = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
a : Dict = load_dataset('''natural_questions''')
a : List[str] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
a : Optional[Any] = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
a : Union[str, Any] = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
a : Tuple = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
a : int = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
a : Union[str, Any] = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 69
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__: Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A__: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 380
| 0
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self , a__ = 128 , a__ = 256 , a__ = 2000.0 , a__ = 768 , a__ = 12 , a__ = 12 , a__ = 64 , a__ = 2048 , a__ = 0.1 , ):
super().__init__()
_lowerCAmelCase : str = nn.Sequential(
nn.Linear(a__ , d_model * 4 , bias=a__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a__ ) , nn.SiLU() , )
_lowerCAmelCase : Optional[Any] = nn.Embedding(a__ , a__ )
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : int = nn.Dropout(p=a__ )
_lowerCAmelCase : str = nn.ModuleList()
for lyr_num in range(a__ ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Optional[int] = DecoderLayer(d_model=a__ , d_kv=a__ , num_heads=a__ , d_ff=a__ , dropout_rate=a__ )
self.decoders.append(a__ )
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(a__ )
_lowerCAmelCase : List[str] = nn.Dropout(p=a__ )
_lowerCAmelCase : Dict = nn.Linear(a__ , a__ , bias=a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase : List[Any] = self.conditioning_emb(a__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : str = torch.broadcast_to(
torch.arange(a__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase : List[Any] = self.position_encoding(a__ )
_lowerCAmelCase : Dict = self.continuous_inputs_projection(a__ )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(a__ )
# decoder: No padding present.
_lowerCAmelCase : List[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Any = [(x, self.encoder_decoder_mask(a__ , a__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Optional[int] = lyr(
a__ , conditioning_emb=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )[0]
_lowerCAmelCase : List[str] = self.decoder_norm(a__ )
_lowerCAmelCase : Optional[int] = self.post_dropout(a__ )
_lowerCAmelCase : int = self.spec_out(a__ )
return spec_out
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__=1e-6 ):
super().__init__()
_lowerCAmelCase : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a__ , d_kv=a__ , num_heads=a__ , dropout_rate=a__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a__ , d_kv=a__ , num_heads=a__ , dropout_rate=a__ , layer_norm_epsilon=a__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a__ , d_ff=a__ , dropout_rate=a__ , layer_norm_epsilon=a__ ) )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : List[Any] = self.layer[0](
a__ , conditioning_emb=a__ , attention_mask=a__ , )
if encoder_hidden_states is not None:
_lowerCAmelCase : Dict = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
a__ , key_value_states=a__ , attention_mask=a__ , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](a__ , a__ )
return (hidden_states,)
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Tuple = TaLayerNorm(a__ )
_lowerCAmelCase : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=a__ )
_lowerCAmelCase : Tuple = Attention(query_dim=a__ , heads=a__ , dim_head=a__ , out_bias=a__ , scale_qk=a__ )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(a__ )
def __A ( self , a__ , a__=None , a__=None , ):
# pre_self_attention_layer_norm
_lowerCAmelCase : List[str] = self.layer_norm(a__ )
if conditioning_emb is not None:
_lowerCAmelCase : Tuple = self.FiLMLayer(a__ , a__ )
# Self-attention block
_lowerCAmelCase : List[str] = self.attention(a__ )
_lowerCAmelCase : str = hidden_states + self.dropout(a__ )
return hidden_states
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Tuple = Attention(query_dim=a__ , heads=a__ , dim_head=a__ , out_bias=a__ , scale_qk=a__ )
_lowerCAmelCase : Any = TaLayerNorm(a__ , eps=a__ )
_lowerCAmelCase : Tuple = nn.Dropout(a__ )
def __A ( self , a__ , a__=None , a__=None , ):
_lowerCAmelCase : int = self.layer_norm(a__ )
_lowerCAmelCase : List[str] = self.attention(
a__ , encoder_hidden_states=a__ , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase : Dict = hidden_states + self.dropout(a__ )
return layer_output
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=a__ , d_ff=a__ , dropout_rate=a__ )
_lowerCAmelCase : str = TaFiLMLayer(in_features=d_model * 4 , out_features=a__ )
_lowerCAmelCase : Dict = TaLayerNorm(a__ , eps=a__ )
_lowerCAmelCase : int = nn.Dropout(a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Optional[int] = self.layer_norm(a__ )
if conditioning_emb is not None:
_lowerCAmelCase : Any = self.film(a__ , a__ )
_lowerCAmelCase : Optional[int] = self.DenseReluDense(a__ )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(a__ )
return hidden_states
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : int = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : List[str] = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : Any = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : Dict = nn.Dropout(a__ )
_lowerCAmelCase : Any = NewGELUActivation()
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.act(self.wi_a(a__ ) )
_lowerCAmelCase : Dict = self.wi_a(a__ )
_lowerCAmelCase : Any = hidden_gelu * hidden_linear
_lowerCAmelCase : Optional[Any] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.wo(a__ )
return hidden_states
class __A ( nn.Module ):
def __init__( self , a__ , a__=1e-6 ):
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.ones(a__ ) )
_lowerCAmelCase : str = eps
def __A ( self , a__ ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a__ )
_lowerCAmelCase : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __A ( nn.Module ):
def __A ( self , a__ ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(a__ , 3.0 )) ))
class __A ( nn.Module ):
def __init__( self , a__ , a__ ):
super().__init__()
_lowerCAmelCase : List[Any] = nn.Linear(a__ , out_features * 2 , bias=a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = self.scale_bias(a__ )
_lowerCAmelCase : str = torch.chunk(a__ , 2 , -1 )
_lowerCAmelCase : str = x * (1 + scale) + shift
return x
| 717
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663
| 0
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[Any] = RobertaPreLayerNormConfig.from_pretrained(
snake_case_,architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_A : List[str] = torch.load(hf_hub_download(repo_id=snake_case_,filename="""pytorch_model.bin""" ) )
_A : Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_A : Optional[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_A : Dict = tensor_value
_A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case_,config=snake_case_,state_dict=snake_case_ )
model.save_pretrained(snake_case_ )
# convert tokenizer
_A : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 307
|
from __future__ import annotations
_snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCAmelCase_ ( snake_case_ ):
_A : str = []
_A : int = len(snake_case_ )
for i in range(snake_case_ ):
_A : float = -1
for j in range(i + 1,snake_case_ ):
if arr[i] < arr[j]:
_A : Dict = arr[j]
break
result.append(snake_case_ )
return result
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = []
for i, outer in enumerate(snake_case_ ):
_A : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_A : List[str] = inner
break
result.append(snake_case_ )
return result
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : list[float] = []
_A : list[float] = [-1] * arr_size
for index in reversed(range(snake_case_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_A : Optional[int] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 307
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714
|
"""simple docstring"""
import argparse
import copy
def lowerCamelCase (a_ :Union[str, Any]) -> Tuple:
lowercase :Dict = {}
with open(a_) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase :List[str] = []
_list.append([line.split()[1], line.split()[2]])
lowercase :Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
lowercase :List[Any] = []
_list.append([line.split()[0], line.split()[2]])
lowercase :Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def lowerCamelCase (a_ :Any , a_ :Dict) -> Any:
with open(a_) as f:
lowercase :Any = f.read(1)
lowercase :Any = start_node
lowercase :Any = []
lowercase :Union[str, Any] = start_node
lowercase :int = 0
while visiting not in first_solution:
lowercase :int = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(a_) and k[0] not in first_solution:
lowercase :str = k[1]
lowercase :str = k[0]
first_solution.append(a_)
lowercase :int = distance_of_first_solution + int(a_)
lowercase :Dict = best_node
first_solution.append(a_)
lowercase :int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase :List[str] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def lowerCamelCase (a_ :List[str] , a_ :str) -> str:
lowercase :Any = []
for n in solution[1:-1]:
lowercase :int = solution.index(a_)
for kn in solution[1:-1]:
lowercase :Union[str, Any] = solution.index(a_)
if n == kn:
continue
lowercase :int = copy.deepcopy(a_)
lowercase :str = kn
lowercase :List[Any] = n
lowercase :int = 0
for k in _tmp[:-1]:
lowercase :Tuple = _tmp[_tmp.index(a_) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase :Tuple = distance + int(i[1])
_tmp.append(a_)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
lowercase :Dict = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda a_: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def lowerCamelCase (a_ :int , a_ :Optional[int] , a_ :List[Any] , a_ :Any , a_ :Optional[Any]) -> List[Any]:
lowercase :Union[str, Any] = 1
lowercase :str = first_solution
lowercase :int = []
lowercase :int = distance_of_first_solution
lowercase :List[str] = solution
while count <= iters:
lowercase :Optional[Any] = find_neighborhood(a_ , a_)
lowercase :Any = 0
lowercase :Optional[Any] = neighborhood[index_of_best_solution]
lowercase :int = len(a_) - 1
lowercase :Dict = False
while not found:
lowercase :List[str] = 0
while i < len(a_):
if best_solution[i] != solution[i]:
lowercase :Tuple = best_solution[i]
lowercase :Tuple = solution[i]
break
lowercase :List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
lowercase :Tuple = True
lowercase :Optional[int] = best_solution[:-1]
lowercase :Any = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase :Union[str, Any] = cost
lowercase :Optional[Any] = solution
else:
lowercase :Dict = index_of_best_solution + 1
lowercase :int = neighborhood[index_of_best_solution]
if len(a_) >= size:
tabu_list.pop(0)
lowercase :int = count + 1
return best_solution_ever, best_cost
def lowerCamelCase (a_ :Tuple=None) -> Any:
lowercase :Tuple = generate_neighbours(args.File)
lowercase , lowercase :List[str] = generate_first_solution(
args.File , a_)
lowercase , lowercase :Union[str, Any] = tabu_search(
a_ , a_ , a_ , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""")
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 475
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ : Tuple = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 489
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : Any ) -> Optional[Any]:
a__ = b.T
a__ = np.sum(np.square(__lowerCamelCase ) , axis=1 )
a__ = np.sum(np.square(__lowerCamelCase ) , axis=0 )
a__ = np.matmul(__lowerCamelCase , __lowerCamelCase )
a__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def _lowerCamelCase (__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Tuple:
a__ = x.reshape(-1 , 3 )
a__ = squared_euclidean_distance(__lowerCamelCase , __lowerCamelCase )
return np.argmin(__lowerCamelCase , axis=1 )
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : str , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : bool = True , lowerCamelCase : bool = True , **lowerCamelCase : Any , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
a__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
a__ = get_size_dict(lowerCamelCase )
a__ = np.array(lowerCamelCase ) if clusters is not None else None
a__ = do_resize
a__ = size
a__ = resample
a__ = do_normalize
a__ = do_color_quantize
def __a ( self : List[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Any , ):
'''simple docstring'''
a__ = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowerCamelCase , size=(size["height"], size["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self : str , lowerCamelCase : np.ndarray , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
a__ = rescale(image=lowerCamelCase , scale=1 / 127.5 , data_format=lowerCamelCase )
a__ = image - 1
return image
def __a ( self : str , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(lowerCamelCase )
a__ = resample if resample is not None else self.resample
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__ = clusters if clusters is not None else self.clusters
a__ = np.array(lowerCamelCase )
a__ = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
a__ = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_normalize:
a__ = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
a__ = [to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__ = np.array(lowerCamelCase )
a__ = color_quantize(lowerCamelCase , lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a__ = images.shape[0]
a__ = images.reshape(lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__ = list(lowerCamelCase )
else:
a__ = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
a__ = {"input_ids": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 489
| 1
|
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase__ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
UpperCAmelCase__ : List[Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase__ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
snake_case__ :Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
snake_case__ :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the training data.'} )
snake_case__ :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the validation data.'} )
snake_case__ :Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
snake_case__ :int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
snake_case__ :float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = {}
if self.train_dir is not None:
lowerCAmelCase__ = self.train_dir
if self.validation_dir is not None:
lowerCAmelCase__ = self.validation_dir
lowerCAmelCase__ = data_files if data_files else None
@dataclass
class A :
snake_case__ :str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
snake_case__ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case__ :str = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'} )
snake_case__ :bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Stride to use for the encoder.'} , )
class A :
def __init__( self : Any , __magic_name__ : List[Any]=192 , __magic_name__ : int=32 , __magic_name__ : Dict=4 , __magic_name__ : List[Any]=0.6 ):
"""simple docstring"""
lowerCAmelCase__ = input_size
lowerCAmelCase__ = mask_patch_size
lowerCAmelCase__ = model_patch_size
lowerCAmelCase__ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
lowerCAmelCase__ = self.input_size // self.mask_patch_size
lowerCAmelCase__ = self.mask_patch_size // self.model_patch_size
lowerCAmelCase__ = self.rand_size**2
lowerCAmelCase__ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = np.random.permutation(self.token_count )[: self.mask_count]
lowerCAmelCase__ = np.zeros(self.token_count , dtype=__magic_name__ )
lowerCAmelCase__ = 1
lowerCAmelCase__ = mask.reshape((self.rand_size, self.rand_size) )
lowerCAmelCase__ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = torch.stack([example["pixel_values"] for example in examples] )
lowerCAmelCase__ = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase__ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase_ ) and data_args.train_val_split > 0.0:
lowerCAmelCase__ = ds["train"].train_test_split(data_args.train_val_split )
lowerCAmelCase__ = split["train"]
lowerCAmelCase__ = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
lowerCAmelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCamelCase_ , "decoder_type" ):
lowerCAmelCase__ = "simmim"
# adapt config
lowerCAmelCase__ = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCAmelCase__ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCAmelCase__ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
lowerCAmelCase__ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCAmelCase__ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCAmelCase__ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCAmelCase__ = AutoModelForMaskedImageModeling.from_config(UpperCamelCase_ )
if training_args.do_train:
lowerCAmelCase__ = ds["train"].column_names
else:
lowerCAmelCase__ = ds["validation"].column_names
if data_args.image_column_name is not None:
lowerCAmelCase__ = data_args.image_column_name
elif "image" in column_names:
lowerCAmelCase__ = "image"
elif "img" in column_names:
lowerCAmelCase__ = "img"
else:
lowerCAmelCase__ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCAmelCase__ = Compose(
[
Lambda(lambda UpperCamelCase_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowerCAmelCase__ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ = [transforms(UpperCamelCase_ ) for image in examples[image_column_name]]
lowerCAmelCase__ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCAmelCase__ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase_ )
# Initialize our trainer
lowerCAmelCase__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , UpperCamelCase_ )
trainer.save_metrics("eval" , UpperCamelCase_ )
# Write model card and (optionally) push to hub
lowerCAmelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
if __name__ == "__main__":
main()
| 48
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =CanineTokenizer
lowerCamelCase : str =False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase : Any ) -> CanineTokenizer:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
__lowerCAmelCase : Tuple = 10_24
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.canine_tokenizer
__lowerCAmelCase : Union[str, Any] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__lowerCAmelCase : str = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__lowerCAmelCase : List[Any] = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.canine_tokenizer
__lowerCAmelCase : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__lowerCAmelCase : Union[str, Any] = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCAmelCase )
self.assertIn("""attention_mask""" , lowerCAmelCase )
self.assertIn("""token_type_ids""" , lowerCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : int = self.canine_tokenizer
__lowerCAmelCase : Optional[int] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__lowerCAmelCase : List[str] = tokenizer(
text_target=lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
__lowerCAmelCase : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__lowerCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
__lowerCAmelCase : List[str] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
__lowerCAmelCase : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
__lowerCAmelCase : int = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowerCAmelCase : Dict = chr(0Xe007 )
additional_special_tokens.append(lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__lowerCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Any = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertIn(lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCAmelCase : Union[str, Any] = tokenizer.__class__.from_pretrained(lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : List[Any] = self.get_clean_sequence(lowerCAmelCase )
# a special token for Canine can be defined as follows:
__lowerCAmelCase : str = 0Xe005
__lowerCAmelCase : Optional[int] = chr(lowerCAmelCase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__lowerCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , 1 )
__lowerCAmelCase : str = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase )
__lowerCAmelCase : List[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : int = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , input_encoded + special_token_id )
__lowerCAmelCase : str = tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : List[str] = chr(0Xe005 )
__lowerCAmelCase : Optional[int] = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__lowerCAmelCase : Tuple = tokenizer.tokenize(lowerCAmelCase )
__lowerCAmelCase : Any = tokenizer.tokenize(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , 1 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase )
self.assertEqual(token_a[0] , lowerCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__lowerCAmelCase : int = 0Xe006
__lowerCAmelCase : Dict = chr(lowerCAmelCase )
__lowerCAmelCase : Tuple = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase )
tokenizer.from_pretrained(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase : Tuple = json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase : int = json.load(lowerCAmelCase )
# a special token for Canine can be defined as follows:
__lowerCAmelCase : Optional[Any] = 0Xe006
__lowerCAmelCase : str = chr(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = [new_token_a]
__lowerCAmelCase : Optional[Any] = [new_token_a]
with open(os.path.join(lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase : Tuple = tokenizer_class.from_pretrained(lowerCAmelCase , extra_ids=0 )
self.assertIn(lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__lowerCAmelCase : Union[str, Any] = 0Xe007
__lowerCAmelCase : List[Any] = chr(lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase : Any = [AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase )]
__lowerCAmelCase : Tuple = tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , extra_ids=0 )
self.assertIn(lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : Any = """hello world"""
if self.space_between_special_tokens:
__lowerCAmelCase : List[Any] = """[CLS] hello world [SEP]"""
else:
__lowerCAmelCase : Any = input
__lowerCAmelCase : Dict = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.decode(lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase , [output, output.lower()] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : int = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__lowerCAmelCase : str = """a"""
__lowerCAmelCase : Union[str, Any] = ord(lowerCAmelCase )
for attr in attributes_list:
setattr(lowerCAmelCase , attr + """_id""" , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , attr + """_id""" ) , lowerCAmelCase )
setattr(lowerCAmelCase , attr + """_id""" , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , attr + """_id""" ) , lowerCAmelCase )
setattr(lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
__lowerCAmelCase : str = 0Xe006
__lowerCAmelCase : Optional[int] = chr(lowerCAmelCase )
setattr(lowerCAmelCase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
"""simple docstring"""
pass
| 710
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case_ (__A : int ) -> str:
__lowerCAmelCase : str = int(__A )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def snake_case_ (__A : Dict , __A : Any , __A : List[str] , __A : Optional[int] , __A : Dict=3_0_0 ) -> int:
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def snake_case_ (__A : Optional[Any] ) -> Tuple:
__lowerCAmelCase : List[Any] = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase : Any = f'''{elt:.6f}''' if isinstance(__A , __A ) else str(__A )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =5
lowerCamelCase : Tuple =0.2
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase : int = 3_00 , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = total
__lowerCAmelCase : Dict = """""" if prefix is None else prefix
__lowerCAmelCase : str = leave
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : Optional[Any] = width
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : int , lowerCAmelCase : bool = False , lowerCAmelCase : str = None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = value
if comment is not None:
__lowerCAmelCase : Optional[Any] = comment
if self.last_value is None:
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : Optional[int] = value
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Any = self.warmup
__lowerCAmelCase : List[str] = 1
self.update_bar(lowerCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase : Optional[Any] = time.time()
__lowerCAmelCase : Optional[int] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase : Optional[Any] = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase : str = None
if value >= self.total:
__lowerCAmelCase : Any = self.total
__lowerCAmelCase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase : List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase )
__lowerCAmelCase : str = value
__lowerCAmelCase : Union[str, Any] = current_time
if self.average_time_per_item is None:
__lowerCAmelCase : Optional[Any] = 1
else:
__lowerCAmelCase : List[str] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = """ """ * (len(str(self.total ) ) - len(str(lowerCAmelCase ) )) + str(lowerCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase : List[str] = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase : Dict = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase : Dict = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
super().__init__(lowerCAmelCase )
__lowerCAmelCase : str = None if column_names is None else [column_names]
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.inner_table is None:
__lowerCAmelCase : Tuple = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase : Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase )
__lowerCAmelCase : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=3_00 ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = NotebookProgressBar(lowerCAmelCase , prefix=lowerCAmelCase , parent=self , width=lowerCAmelCase )
return self.child_bar
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = None
self.display()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = None
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , **lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : str = 0
__lowerCAmelCase : List[Any] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
__lowerCAmelCase : int = NotebookTrainingTracker(state.max_steps , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , **lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
if not has_length(lowerCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase : List[str] = self.training_tracker.add_child(len(lowerCAmelCase ) )
else:
__lowerCAmelCase : List[Any] = NotebookProgressBar(len(lowerCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=None , **lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase : List[str] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase : Tuple = state.global_step
self.training_tracker.write_line(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int=None , **lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
if self.training_tracker is not None:
__lowerCAmelCase : Union[str, Any] = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase : int = log["""loss"""]
break
if self.first_column == "Epoch":
__lowerCAmelCase : int = int(state.epoch )
else:
__lowerCAmelCase : Optional[int] = state.global_step
__lowerCAmelCase : Union[str, Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
__lowerCAmelCase : Dict = re.sub(r"""\_loss$""" , """""" , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop("""total_flos""" , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop("""epoch""" , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , lowerCAmelCase )
__lowerCAmelCase : Dict = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , lowerCAmelCase )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
__lowerCAmelCase : Tuple = v
else:
__lowerCAmelCase : Any = k.split("""_""" )
__lowerCAmelCase : Optional[Any] = """ """.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase : List[str] = v
self.training_tracker.write_line(lowerCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase : int = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase : str = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , **lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = None
| 218
| 0
|
def A_ ( lowercase_ , lowercase_ ) -> str:
_snake_case : str = ''''''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 326
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , __UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __magic_name__ ( ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , __UpperCAmelCase ):
pass
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , __UpperCAmelCase ) is None
with patch_submodule(_test_patching , """len""" , __UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_start_and_stop_mock__"""
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , """open""" , __UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_join__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_dirname__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.rename""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , __UpperCAmelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , __UpperCAmelCase ):
pass
| 109
| 0
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
"""simple docstring"""
a : Union[str, Any] ="esm"
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = position_embedding_type
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Optional[int] = emb_layer_norm_before
lowerCAmelCase : List[str] = token_dropout
lowerCAmelCase : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCAmelCase : Dict = EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = EsmFoldConfig(**snake_case__ )
lowerCAmelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCAmelCase : List[str] = get_default_vocab_list()
else:
lowerCAmelCase : List[Any] = vocab_list
else:
lowerCAmelCase : List[Any] = None
lowerCAmelCase : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , snake_case__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
lowerCAmelCase : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : List[Any] =None
a : Optional[int] =True
a : int =False
a : int =False
a : Union[str, Any] =False
a : str =0
a : Union[str, Any] =True
a : Optional[int] =False
a : List[Any] =1_28
a : str =None
def lowercase__ ( self ):
"""simple docstring"""
if self.trunk is None:
lowerCAmelCase : Dict = TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
lowerCAmelCase : int = TrunkConfig(**self.trunk )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = asdict(self )
lowerCAmelCase : Any = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : int =48
a : int =10_24
a : Union[str, Any] =1_28
a : Optional[int] =32
a : Optional[int] =32
a : int =32
a : Optional[int] =0
a : Any =0
a : Dict =False
a : List[Any] =4
a : List[Any] =1_28
a : Any =None
def lowercase__ ( self ):
"""simple docstring"""
if self.structure_module is None:
lowerCAmelCase : str = StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
lowerCAmelCase : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowerCAmelCase : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = asdict(self )
lowerCAmelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : int =3_84
a : str =1_28
a : Union[str, Any] =16
a : Any =1_28
a : Optional[int] =12
a : Union[str, Any] =4
a : int =8
a : Tuple =0.1
a : Any =8
a : Union[str, Any] =1
a : List[Any] =2
a : Dict =7
a : Optional[int] =10
a : Dict =1E-8
a : Optional[int] =1E5
def lowercase__ ( self ):
"""simple docstring"""
return asdict(self )
def a__ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 715
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681
| 0
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a =logging.getLogger(__name__)
class __UpperCAmelCase ( __lowerCAmelCase ):
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCamelCase__ =self.layer[current_layer](_lowerCamelCase , _lowerCamelCase , head_mask[current_layer] )
lowerCamelCase__ =layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , __lowerCAmelCase , )
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , _lowerCamelCase ):
super().__init__(_lowerCamelCase )
lowerCamelCase__ =BertEncoderWithPabee(_lowerCamelCase )
self.init_weights()
lowerCamelCase__ =0
lowerCamelCase__ =0
lowerCamelCase__ =0
lowerCamelCase__ =0
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =threshold
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =patience
def _a ( self ):
lowerCamelCase__ =0
lowerCamelCase__ =0
def _a ( self ):
lowerCamelCase__ =self.inference_layers_num / self.inference_instances_num
lowerCamelCase__ =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_lowerCamelCase )
@add_start_docstrings_to_model_forward(_lowerCamelCase )
def _a ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
lowerCamelCase__ =input_ids.size()
elif inputs_embeds is not None:
lowerCamelCase__ =inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
lowerCamelCase__ =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCamelCase__ =torch.ones(_lowerCamelCase , device=_lowerCamelCase )
if token_type_ids is None:
lowerCamelCase__ =torch.zeros(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCamelCase__ =self.get_extended_attention_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =encoder_hidden_states.size()
lowerCamelCase__ =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCamelCase__ =torch.ones(_lowerCamelCase , device=_lowerCamelCase )
lowerCamelCase__ =self.invert_attention_mask(_lowerCamelCase )
else:
lowerCamelCase__ =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCamelCase__ =self.get_head_mask(_lowerCamelCase , self.config.num_hidden_layers )
lowerCamelCase__ =self.embeddings(
input_ids=_lowerCamelCase , position_ids=_lowerCamelCase , token_type_ids=_lowerCamelCase , inputs_embeds=_lowerCamelCase )
lowerCamelCase__ =embedding_output
if self.training:
lowerCamelCase__ =[]
for i in range(self.config.num_hidden_layers ):
lowerCamelCase__ =self.encoder.adaptive_forward(
_lowerCamelCase , current_layer=_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase )
lowerCamelCase__ =self.pooler(_lowerCamelCase )
lowerCamelCase__ =output_layers[i](output_dropout(_lowerCamelCase ) )
res.append(_lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
lowerCamelCase__ =self.encoder(
_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
lowerCamelCase__ =self.pooler(encoder_outputs[0] )
lowerCamelCase__ =[output_layers[self.config.num_hidden_layers - 1](_lowerCamelCase )]
else:
lowerCamelCase__ =0
lowerCamelCase__ =None
lowerCamelCase__ =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCamelCase__ =self.encoder.adaptive_forward(
_lowerCamelCase , current_layer=_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase )
lowerCamelCase__ =self.pooler(_lowerCamelCase )
lowerCamelCase__ =output_layers[i](_lowerCamelCase )
if regression:
lowerCamelCase__ =logits.detach()
if patient_result is not None:
lowerCamelCase__ =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCamelCase__ =0
else:
lowerCamelCase__ =logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCamelCase__ =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_lowerCamelCase ) ):
patient_counter += 1
else:
lowerCamelCase__ =0
lowerCamelCase__ =logits
if patient_counter == self.patience:
break
lowerCamelCase__ =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , __lowerCAmelCase , )
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , _lowerCamelCase ):
super().__init__(_lowerCamelCase )
lowerCamelCase__ =config.num_labels
lowerCamelCase__ =BertModelWithPabee(_lowerCamelCase )
lowerCamelCase__ =nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
def _a ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
lowerCamelCase__ =self.bert(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCamelCase__ =(logits[-1],)
if labels is not None:
lowerCamelCase__ =None
lowerCamelCase__ =0
for ix, logits_item in enumerate(_lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ =MSELoss()
lowerCamelCase__ =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase__ =CrossEntropyLoss()
lowerCamelCase__ =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCamelCase__ =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCamelCase__ =(total_loss / total_weights,) + outputs
return outputs
| 530
|
"""simple docstring"""
import random
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> dict:
'''simple docstring'''
lowerCamelCase__ ={i: [] for i in range(__lowerCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__lowerCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__lowerCAmelCase ):
for j in range(i + 1 , __lowerCAmelCase ):
if random.random() < probability:
graph[i].append(__lowerCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__lowerCAmelCase )
return graph
def lowerCamelCase_ ( __lowerCAmelCase ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(__lowerCAmelCase ) if i != j] for i in range(__lowerCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530
| 1
|
def _snake_case (__lowercase , __lowercase):
return x if y == 0 else greatest_common_divisor(__lowercase , x % y)
def _snake_case (__lowercase , __lowercase):
return (x * y) // greatest_common_divisor(__lowercase , __lowercase)
def _snake_case (__lowercase = 20):
UpperCamelCase_ = 1
for i in range(1 , n + 1):
UpperCamelCase_ = lcm(__lowercase , __lowercase)
return g
if __name__ == "__main__":
print(f'{solution() = }')
| 618
|
from typing import Any
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# Creates data structures and fill initial step
UpperCamelCase_ = {}
UpperCamelCase_ = {}
for state in states_space:
UpperCamelCase_ = observations_space[0]
UpperCamelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowercase)):
UpperCamelCase_ = observations_space[o]
UpperCamelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
# Update probabilities and pointers dicts
UpperCamelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase_ = arg_max
# The final observation
UpperCamelCase_ = observations_space[len(__lowercase) - 1]
# argmax for given final observation
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
UpperCamelCase_ = arg_max
# Process pointers backwards
UpperCamelCase_ = last_state
UpperCamelCase_ = []
for o in range(len(__lowercase) - 1 , -1 , -1):
result.append(__lowercase)
UpperCamelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validate_not_empty(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
_validate_lists(__lowercase , __lowercase)
_validate_dicts(
__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def _snake_case (__lowercase , __lowercase):
_validate_list(__lowercase , 'observations_space')
_validate_list(__lowercase , 'states_space')
def _snake_case (__lowercase , __lowercase):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list"""
raise ValueError(__lowercase)
else:
for x in _object:
if not isinstance(__lowercase , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(__lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , ):
_validate_dict(__lowercase , 'initial_probabilities' , __lowercase)
_validate_nested_dict(__lowercase , 'transition_probabilities')
_validate_nested_dict(__lowercase , 'emission_probabilities')
def _snake_case (__lowercase , __lowercase):
_validate_dict(_object , __lowercase , __lowercase)
for x in _object.values():
_validate_dict(__lowercase , __lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a dict"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object):
UpperCamelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object.values()):
UpperCamelCase_ = 'nested dictionary ' if nested else ''
UpperCamelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowercase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618
| 1
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 691
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ : Tuple = 1_6
lowerCAmelCase_ : Union[str, Any] = 3_2
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = 16 , lowerCAmelCase = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case__ )
UpperCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
# Initialize accelerator
UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["""lr"""]
UpperCAmelCase = int(config["""num_epochs"""] )
UpperCAmelCase = int(config["""seed"""] )
UpperCAmelCase = int(config["""batch_size"""] )
UpperCAmelCase = args.model_name_or_path
set_seed(snake_case__ )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase = 1
UpperCAmelCase = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
UpperCAmelCase = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase = 0
# Now we train the model
UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase = 0
UpperCAmelCase = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
UpperCAmelCase = model(**snake_case__ )
UpperCAmelCase = outputs.loss
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**snake_case__ )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase , UpperCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , snake_case__ )
UpperCAmelCase = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=snake_case__ , default=snake_case__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=3 , help="""Number of train epochs.""" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 717
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( a_ ):
_A : List[str] = ['pixel_values']
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = 0.9 , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = None , snake_case__ = 1 / 2_55 , snake_case__ = True , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = crop_pct
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCAmelCase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCAmelCase = int(size["""height"""] / crop_pct )
else:
UpperCAmelCase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
else:
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 378
| 0
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
_UpperCAmelCase : Any = len(_lowerCAmelCase )
for i in range(length - 1 ):
_UpperCAmelCase : Any = i
for k in range(i + 1, _lowerCAmelCase ):
if collection[k] < collection[least]:
_UpperCAmelCase : str = k
if least != i:
_UpperCAmelCase , _UpperCAmelCase : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ : Any = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 238
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( __a):
__a : Dict = ["""input_features""", """attention_mask"""]
def __init__( self , _A=80 , _A=1_60_00 , _A=0.0 , _A=10 , _A=25 , _A="hamming_window" , _A=32768.0 , _A=0.97 , _A=1.0 , _A=True , _A=True , _A=False , **_A , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
_UpperCAmelCase : List[str] = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : List[str] = padding_value
_UpperCAmelCase : Dict = hop_length
_UpperCAmelCase : List[str] = win_length
_UpperCAmelCase : Tuple = frame_signal_scale
_UpperCAmelCase : Optional[Any] = preemphasis_coeff
_UpperCAmelCase : int = mel_floor
_UpperCAmelCase : Tuple = normalize_means
_UpperCAmelCase : str = normalize_vars
_UpperCAmelCase : List[Any] = win_function
_UpperCAmelCase : List[Any] = return_attention_mask
_UpperCAmelCase : str = win_length * sampling_rate // 10_00
_UpperCAmelCase : List[str] = hop_length * sampling_rate // 10_00
_UpperCAmelCase : List[str] = optimal_fft_length(self.sample_size )
_UpperCAmelCase : Dict = (self.n_fft // 2) + 1
def __snake_case ( self , _A ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
_UpperCAmelCase : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
else:
_UpperCAmelCase : int = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=_A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_A , preemphasis=self.preemphasis_coeff , mel_filters=_A , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __snake_case ( self , _A , _A , _A ) -> Any:
'''simple docstring'''
if self.normalize_means:
_UpperCAmelCase : List[Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : List[str] = np.subtract(_A , _A )
if self.normalize_vars:
_UpperCAmelCase : Dict = x[:input_length].std(axis=0 )
_UpperCAmelCase : Tuple = np.divide(_A , _A )
if input_length < x.shape[0]:
_UpperCAmelCase : Optional[Any] = padding_value
# make sure array is in float32
_UpperCAmelCase : Any = x.astype(np.floataa )
return x
def __snake_case ( self , _A , _A = None ) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_A , _A , self.padding_value ) for x, n in zip(_A , _A )]
def __call__( self , _A , _A = False , _A = None , _A = False , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : Any = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
_UpperCAmelCase : str = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Optional[int] = [raw_speech]
# extract fbank features
_UpperCAmelCase : int = [self._extract_mfsc_features(_A ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Any = BatchFeature({"""input_features""": features} )
_UpperCAmelCase : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
# make sure list is in array format
_UpperCAmelCase : List[Any] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _A ):
_UpperCAmelCase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Optional[int] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_UpperCAmelCase : Any = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase : Union[str, Any] = (
np.array(_A , dtype=np.intaa )
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase : List[str] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_A )
if return_tensors is not None:
_UpperCAmelCase : Dict = padded_inputs.convert_to_tensors(_A )
return padded_inputs
| 238
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 588
|
import functools
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
@functools.cache
def min_distance(__UpperCamelCase, __UpperCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE__ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, __UpperCamelCase ), 1 + min_distance(__UpperCamelCase, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=13 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : Union[str, Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def a ( self : List[Any] ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Tuple ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCAmelCase = DistilBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCAmelCase = DistilBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = DistilBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = DistilBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = DistilBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__A = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = True
__A = True
__A = True
def a ( self : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , dim=37 )
def a ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase )
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase )
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase )
@slow
def a ( self : List[str] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = DistilBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowerCAmelCase = True
_lowerCAmelCase = model_class(config=__lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , 'traced_model.pt' ) )
_lowerCAmelCase = torch.jit.load(os.path.join(__lowerCAmelCase , 'traced_model.pt' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['input_ids'].to(__lowerCAmelCase ) , inputs_dict['attention_mask'].to(__lowerCAmelCase ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 309
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCAmelCase = Sql(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , )
# Build dataset for splits
_lowerCAmelCase = self.builder.as_dataset(
split='train' , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
_lowerCAmelCase = dataset
_lowerCAmelCase = name
_lowerCAmelCase = con
_lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase = num_proc
_lowerCAmelCase = to_sql_kwargs
def a ( self : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = self.to_sql_kwargs.pop('sql' , __lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop('con' , __lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop('index' , __lowerCAmelCase )
_lowerCAmelCase = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs )
return written
def a ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args
_lowerCAmelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCAmelCase = batch.to_pandas()
_lowerCAmelCase = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase )
return num_rows or len(__lowerCAmelCase )
def a ( self : Dict , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 309
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _SCREAMING_SNAKE_CASE ( A : Any ) -> int:
"""simple docstring"""
__snake_case : int = image.size
__snake_case : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case : Dict = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
__snake_case : int = np.array(A ).astype(np.floataa ) / 255.0
__snake_case : Dict = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Optional[int] = torch.from_numpy(A )
return 2.0 * image - 1.0
class a_ ( UpperCamelCase_ ):
def __init__(self , __a , __a , __a , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__a , unet=__a , scheduler=__a)
@torch.no_grad()
def __call__(self , __a = None , __a = 1 , __a = 1_0_0 , __a = 0.0 , __a = None , __a = "pil" , __a = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(__a , PIL.Image.Image):
__snake_case : Optional[int] = 1
elif isinstance(__a , torch.Tensor):
__snake_case : int = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a)}""")
if isinstance(__a , PIL.Image.Image):
__snake_case : Any = preprocess(__a)
__snake_case : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : List[str] = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : Optional[Any] = next(self.unet.parameters()).dtype
__snake_case : Tuple = randn_tensor(__a , generator=__a , device=self.device , dtype=__a)
__snake_case : Optional[int] = image.to(device=self.device , dtype=__a)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__a , device=self.device)
__snake_case : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case : Optional[Any] = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(__a):
# concat latents and low resolution image in the channel dimension.
__snake_case : Optional[Any] = torch.cat([latents, image] , dim=1)
__snake_case : Any = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
__snake_case : Optional[Any] = self.unet(__a , __a).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : List[str] = self.scheduler.step(__a , __a , __a , **__a).prev_sample
# decode the image latents with the VQVAE
__snake_case : Tuple = self.vqvae.decode(__a).sample
__snake_case : Any = torch.clamp(__a , -1.0 , 1.0)
__snake_case : Dict = image / 2 + 0.5
__snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__snake_case : Optional[int] = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 701
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """vit_msn"""
def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(**__a)
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = qkv_bias
| 61
| 0
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase =re.compile(R"\s+")
def snake_case ( a_ : Tuple ) -> List[str]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(a_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def snake_case ( a_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = [len(a_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(a_ ), "line_max": max(a_ )}
def snake_case ( a_ : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ : Dict = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def snake_case ( a_ : List[str] , a_ : Any ) -> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def snake_case ( a_ : Optional[Any] , a_ : Any=5 ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = ["""auto-generated""", """autogenerated""", """automatically generated"""]
UpperCamelCase_ : Any = example["""content"""].splitlines()
for _, line in zip(range(a_ ) , a_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case ( a_ : Any , a_ : List[Any]=5 , a_ : Any=0.05 ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[Any] = ["""unit tests""", """test file""", """configuration file"""]
UpperCamelCase_ : List[Any] = example["""content"""].splitlines()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[Any] = 0
# first test
for _, line in zip(range(a_ ) , a_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase_ : int = example["""content"""].count("""\n""" )
UpperCamelCase_ : List[str] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case ( a_ : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ["""def """, """class """, """for """, """while """]
UpperCamelCase_ : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case ( a_ : str , a_ : Optional[int]=4 ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = example["""content"""].splitlines()
UpperCamelCase_ : Dict = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case ( a_ : int ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[str] = tokenizer(example["""content"""] , truncation=a_ )["""input_ids"""]
UpperCamelCase_ : Optional[int] = len(example["""content"""] ) / len(a_ )
return {"ratio": ratio}
def snake_case ( a_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : str = {}
results.update(get_hash(a_ ) )
results.update(line_stats(a_ ) )
results.update(alpha_stats(a_ ) )
results.update(char_token_ratio(a_ ) )
results.update(is_autogenerated(a_ ) )
results.update(is_config_or_test(a_ ) )
results.update(has_no_keywords(a_ ) )
results.update(has_few_assignments(a_ ) )
return results
def snake_case ( a_ : Optional[Any] , a_ : List[str] , a_ : List[str] ) -> List[Any]:
"""simple docstring"""
if not check_uniques(a_ , a_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case ( a_ : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(a_ , """rb""" ) as f_in:
with gzip.open(str(a_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(a_ , a_ )
os.unlink(a_ )
# Settings
UpperCamelCase =HfArgumentParser(PreprocessingArguments)
UpperCamelCase =parser.parse_args()
if args.num_workers is None:
UpperCamelCase =multiprocessing.cpu_count()
UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase =time.time()
UpperCamelCase =load_dataset(args.dataset_name, split="train")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
UpperCamelCase =time.time()
UpperCamelCase =ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
UpperCamelCase =set(ds.unique("hash"))
UpperCamelCase =len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
UpperCamelCase =time.time()
UpperCamelCase =ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase =time.time()
UpperCamelCase , UpperCamelCase =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
UpperCamelCase =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCamelCase =output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCamelCase =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase =str(data_dir / f"file-{file_number+1:012}.json")
UpperCamelCase =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 208
|
'''simple docstring'''
def snake_case ( a_ : str , a_ : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = (boundary[1] - boundary[0]) / steps
UpperCamelCase_ : Dict = boundary[0]
UpperCamelCase_ : Any = boundary[1]
UpperCamelCase_ : Union[str, Any] = make_points(a_ , a_ , a_ )
UpperCamelCase_ : Any = 0.0
y += (h / 2.0) * f(a_ )
for i in x_i:
# print(i)
y += h * f(a_ )
y += (h / 2.0) * f(a_ )
return y
def snake_case ( a_ : Tuple , a_ : Any , a_ : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = a + h
while x < (b - h):
yield x
UpperCamelCase_ : List[str] = x + h
def snake_case ( a_ : List[str] ) -> Tuple: # enter your function here
"""simple docstring"""
UpperCamelCase_ : int = (x - 0) * (x - 0)
return y
def snake_case ( ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 0.0 # Lower bound of integration
UpperCamelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCamelCase_ : Optional[Any] = 10.0 # define number of steps or resolution
UpperCamelCase_ : Optional[Any] = [a, b] # define boundary of integration
UpperCamelCase_ : Any = method_a(a_ , a_ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 208
| 1
|
class A : # Public class to implement a graph
"""simple docstring"""
def __init__( self : int,lowercase_ : int,lowercase_ : int,lowercase_ : list[list[bool]] )-> None:
'''simple docstring'''
A__ = row
A__ = col
A__ = graph
def snake_case__ ( self : List[Any],lowercase_ : int,lowercase_ : int,lowercase_ : list[list[bool]] )-> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def snake_case__ ( self : Optional[Any],lowercase_ : int,lowercase_ : int,lowercase_ : list[list[bool]] )-> None:
'''simple docstring'''
A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k],j + col_nbr[k],lowercase_ ):
self.diffs(i + row_nbr[k],j + col_nbr[k],lowercase_ )
def snake_case__ ( self : str )-> int: # And finally, count all islands.
'''simple docstring'''
A__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
A__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowercase_,lowercase_,lowercase_ )
count += 1
return count
| 586
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase_ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and ลukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase_ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase_ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Any )-> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),id='references' ),
} ),)
def snake_case__ ( self : List[str],lowercase_ : List[List[List[str]]],lowercase_ : List[List[str]],lowercase_ : int = 1,lowercase_ : int = 4,)-> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase_,hypotheses=lowercase_,min_len=lowercase_,max_len=lowercase_ )
}
| 586
| 1
|
# Lint as: python3
import itertools
import os
import re
_a : Union[str, Any] = re.compile(R'([A-Z]+)([A-Z][a-z])')
_a : Union[str, Any] = re.compile(R'([a-z\d])([A-Z])')
_a : Tuple = re.compile(R'(?<!_)_(?!_)')
_a : Any = re.compile(R'(_{2,})')
_a : List[str] = R'^\w+(\.\w+)*$'
_a : Tuple = R'<>:/\|?*'
def a_ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : int = _uppercase_uppercase_re.sub(R'''\1_\2''' , __magic_name__ )
snake_case : Union[str, Any] = _lowercase_uppercase_re.sub(R'''\1_\2''' , __magic_name__ )
return name.lower()
def a_ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : Union[str, Any] = _single_underscore_re.split(__magic_name__ )
snake_case : Tuple = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != '''''' )
def a_ ( __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
snake_case : Dict = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ) -> str:
"""simple docstring"""
snake_case : Dict = filename_prefix_for_split(__magic_name__ , __magic_name__ )
snake_case : Any = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
snake_case : Tuple = len(__magic_name__ )
snake_case : Optional[Any] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
snake_case : List[Any] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
snake_case : List[str] = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 598
|
from math import log
from scipy.constants import Boltzmann, physical_constants
_a : List[str] = 300 # TEMPERATURE (unit = K)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Dict , __A: List[str] , __A: List[str]=7 , __A: Dict=3 , __A: List[str]=18 , __A: Tuple=30 , __A: Optional[Any]=400 , __A: Union[str, Any]=True , __A: Union[str, Any]=None , __A: str=True , __A: Optional[int]=[0.5, 0.5, 0.5] , __A: List[str]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
a__ = size if size is not None else {'''height''': 18, '''width''': 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
def lowercase ( self: str ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DPTImageProcessor if is_vision_available() else None
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = DPTImageProcessingTester(self )
@property
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
a__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a__ = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a__ = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a__ = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 200
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='EncodecFeatureExtractor'
_SCREAMING_SNAKE_CASE =('T5Tokenizer', 'T5TokenizerFast')
def __init__( self: List[Any] , __A: Any , __A: Dict ):
'''simple docstring'''
super().__init__(__A , __A )
a__ = self.feature_extractor
a__ = False
def lowercase ( self: Union[str, Any] , __A: List[Any]=None , __A: Optional[Any]=None , __A: List[Any]=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self: Union[str, Any] , *__A: int , **__A: Dict ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a__ = kwargs.pop('''audio''' , __A )
a__ = kwargs.pop('''sampling_rate''' , __A )
a__ = kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a__ = args[0]
a__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
a__ = self.tokenizer(__A , **__A )
if audio is not None:
a__ = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
a__ = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
a__ = audio_inputs['''padding_mask''']
return inputs
def lowercase ( self: Union[str, Any] , *__A: List[str] , **__A: Tuple ):
'''simple docstring'''
a__ = kwargs.pop('''audio''' , __A )
a__ = kwargs.pop('''padding_mask''' , __A )
if len(__A ) > 0:
a__ = args[0]
a__ = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowercase ( self: Union[str, Any] , *__A: Optional[int] , **__A: Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__A , **__A )
def lowercase ( self: Union[str, Any] , __A: Dict , __A: Optional = None ):
'''simple docstring'''
a__ = to_numpy(__A )
a__ ,a__ ,a__ = audio_values.shape
if padding_mask is None:
return list(__A )
a__ = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
a__ = seq_len - padding_mask.shape[-1]
a__ = 1 - self.feature_extractor.padding_value
a__ = np.pad(__A , ((0, 0), (0, difference)) , '''constant''' , constant_values=__A )
a__ = audio_values.tolist()
for i in range(__A ):
a__ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
a__ = sliced_audio.reshape(__A , -1 )
return audio_values
| 200
| 1
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _snake_case ( snake_case__ : dict ):
return (data["data"], data["target"])
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
A = XGBClassifier()
classifier.fit(snake_case__ , snake_case__ )
return classifier
def _snake_case ( ):
A = load_iris()
A , A = data_handling(snake_case__ )
A , A , A , A = train_test_split(
snake_case__ , snake_case__ , test_size=0.25 )
A = iris['target_names']
# Create an XGBoost Classifier from the training data
A = xgboost(snake_case__ , snake_case__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case__ , snake_case__ , snake_case__ , display_labels=snake_case__ , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 91
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> int:
__magic_name__: Optional[int] = 0
if start < end:
__magic_name__: Union[str, Any] = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Tuple = temp
__magic_name__, __magic_name__: int = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = 0
__magic_name__: str = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[int] = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Optional[int] = temp
__magic_name__: Dict = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__magic_name__: List[Any] = new_pivot_index + 1
__magic_name__: Any = a[new_pivot_index]
__magic_name__: int = a[index]
__magic_name__: Union[str, Any] = temp
__magic_name__: List[Any] = a[new_pivot_index + 1]
__magic_name__: Union[str, Any] = a[end]
__magic_name__: Dict = temp
return new_pivot_index + 1, count
__lowerCamelCase = TemporaryFile()
__lowerCamelCase = 1_00 # 1000 elements are to be sorted
__lowerCamelCase , __lowerCamelCase = 0, 1 # mean and standard deviation
__lowerCamelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__lowerCamelCase = np.load(outfile)
__lowerCamelCase = len(M) - 1
__lowerCamelCase = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 96
| 0
|
'''simple docstring'''
from maths.prime_check import is_prime
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : Dict = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCAmelCase )
if is_prime(__UpperCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418
|
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = "geglu" , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "layer_norm" , UpperCamelCase_ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : int = only_cross_attention
lowerCamelCase_ : Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowerCamelCase_ : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase_ : Optional[int] = AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ : Tuple = AdaLayerNormZero(UpperCamelCase_ , UpperCamelCase_ )
else:
lowerCamelCase_ : Any = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
lowerCamelCase_ : Tuple = Attention(
query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase_ : List[str] = (
AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
)
lowerCamelCase_ : List[str] = Attention(
query_dim=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , upcast_attention=UpperCamelCase_ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : List[str] = None
# 3. Feed-forward
lowerCamelCase_ : Union[str, Any] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = FeedForward(UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn=UpperCamelCase_ , final_dropout=UpperCamelCase_ )
# let chunk size default to None
lowerCamelCase_ : int = None
lowerCamelCase_ : str = 0
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
lowerCamelCase_ : int = chunk_size
lowerCamelCase_ : Dict = dim
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.LongTensor] = None , UpperCamelCase_ : Dict[str, Any] = None , UpperCamelCase_ : Optional[torch.LongTensor] = None , ) -> Dict:
"""simple docstring"""
if self.use_ada_layer_norm:
lowerCamelCase_ : int = self.norma(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = self.norma(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase_ : Optional[Any] = self.norma(UpperCamelCase_ )
lowerCamelCase_ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase_ : int = self.attna(
UpperCamelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ : str = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase_ : Tuple = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase_ : List[Any] = (
self.norma(UpperCamelCase_ , UpperCamelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase_ )
)
lowerCamelCase_ : Tuple = self.attna(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : str = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase_ : Tuple = self.norma(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
lowerCamelCase_ : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase_ : Optional[int] = torch.cat(
[self.ff(UpperCamelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase_ : Optional[Any] = self.ff(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ : List[str] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase_ : Optional[int] = ff_output + hidden_states
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 4 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : str = "geglu" , UpperCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Tuple = int(dim * mult )
lowerCamelCase_ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase_ : Optional[int] = GELU(UpperCamelCase_ , UpperCamelCase_ )
if activation_fn == "gelu-approximate":
lowerCamelCase_ : Any = GELU(UpperCamelCase_ , UpperCamelCase_ , approximate='''tanh''' )
elif activation_fn == "geglu":
lowerCamelCase_ : Tuple = GEGLU(UpperCamelCase_ , UpperCamelCase_ )
elif activation_fn == "geglu-approximate":
lowerCamelCase_ : Union[str, Any] = ApproximateGELU(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Any = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase_ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase_ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase_ ) )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
for module in self.net:
lowerCamelCase_ : Optional[int] = module(UpperCamelCase_ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str = "none" ) -> int:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : int = approximate
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.proj(UpperCamelCase_ )
lowerCamelCase_ : int = self.gelu(UpperCamelCase_ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Optional[Any] = nn.Linear(UpperCamelCase_ , dim_out * 2 )
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : int = self.proj(UpperCamelCase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase_ )
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> List[str]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : List[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = self.proj(UpperCamelCase_ )
return x * torch.sigmoid(1.702 * x )
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Tuple = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Tuple = nn.SiLU()
lowerCamelCase_ : List[str] = nn.Linear(UpperCamelCase_ , embedding_dim * 2 )
lowerCamelCase_ : List[Any] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.linear(self.silu(self.emb(UpperCamelCase_ ) ) )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = torch.chunk(UpperCamelCase_ , 2 )
lowerCamelCase_ : List[Any] = self.norm(UpperCamelCase_ ) * (1 + scale) + shift
return x
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Tuple = CombinedTimestepLabelEmbeddings(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : List[Any] = nn.SiLU()
lowerCamelCase_ : str = nn.Linear(UpperCamelCase_ , 6 * embedding_dim , bias=UpperCamelCase_ )
lowerCamelCase_ : Dict = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ , eps=1e-6 )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int=None ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.linear(self.silu(self.emb(UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=UpperCamelCase_ ) ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = emb.chunk(6 , dim=1 )
lowerCamelCase_ : Tuple = self.norm(UpperCamelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : float = 1e-5 ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : str = num_groups
lowerCamelCase_ : List[Any] = eps
if act_fn is None:
lowerCamelCase_ : Any = None
else:
lowerCamelCase_ : List[str] = get_activation(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = nn.Linear(UpperCamelCase_ , out_dim * 2 )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.act:
lowerCamelCase_ : Optional[int] = self.act(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = self.linear(UpperCamelCase_ )
lowerCamelCase_ : List[str] = emb[:, :, None, None]
lowerCamelCase_ , lowerCamelCase_ : int = emb.chunk(2 , dim=1 )
lowerCamelCase_ : List[str] = F.group_norm(UpperCamelCase_ , self.num_groups , eps=self.eps )
lowerCamelCase_ : Optional[Any] = x * (1 + scale) + shift
return x
| 418
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Optional[Any] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 193
|
import random
from typing import Any
def UpperCAmelCase_ ( snake_case__ ) -> list[Any]:
"""simple docstring"""
for _ in range(len(snake_case__ ) ):
lowerCAmelCase__ = random.randint(0 , len(snake_case__ ) - 1 )
lowerCAmelCase__ = random.randint(0 , len(snake_case__ ) - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = data[b], data[a]
return data
if __name__ == "__main__":
_lowerCAmelCase : int = [0, 1, 2, 3, 4, 5, 6, 7]
_lowerCAmelCase : Optional[Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 193
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__: Tuple = '''CLIPImageProcessor'''
UpperCAmelCase__: Union[str, Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , A__=None , A__=None , **A__ ):
A__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
A__ : Any = kwargs.pop("""feature_extractor""" )
A__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : Tuple = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
A__ : List[Any] = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
A__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@property
def __A ( self ):
A__ : Optional[int] = self.tokenizer.model_input_names
A__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A__ , )
return self.image_processor
| 64
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64
| 1
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def snake_case ( ) -> Tuple:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('https://huggingface.co' )
| 284
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( A__ ):
if isinstance(A__, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowercase :
"""simple docstring"""
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = after_output[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : str = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ : Any = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ : Tuple = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ : str = inputs_dict
SCREAMING_SNAKE_CASE_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model(**lowerCAmelCase__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : Dict = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE_ : Optional[int] = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE_ : Any = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ : int = model_a(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model_a(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = after_outputs[0]
SCREAMING_SNAKE_CASE_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_3
SCREAMING_SNAKE_CASE_ : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = FlaxViTModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : str = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = 1_3
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxCLIPVisionModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : int = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE_ : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 101
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( _lowercase):
def _UpperCamelCase ( self : int ) -> List[str]:
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = 5
# Realm tok
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
_UpperCamelCase = os.path.join(__UpperCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
def _UpperCamelCase ( self : int ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : List[Any] ) -> int:
_UpperCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def _UpperCamelCase ( self : int ) -> List[Any]:
_UpperCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=__UpperCamelCase , )
return block_records
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
_UpperCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3] , dtype='''long''' )
_UpperCamelCase = tokenizer(['''Test question'''] ).input_ids
_UpperCamelCase = tokenizer(
['''the fourth'''] , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
__UpperCamelCase , __UpperCamelCase , answer_ids=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''np''' )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def _UpperCamelCase ( self : Dict ) -> Tuple:
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3, 5] , dtype='''long''' )
_UpperCamelCase = tokenizer(['''Test question'''] ).input_ids
_UpperCamelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
__UpperCamelCase , __UpperCamelCase , answer_ids=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , __UpperCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> List[str]:
_UpperCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_UpperCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_UpperCamelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 342
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCAmelCase = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
UpperCAmelCase = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
UpperCAmelCase = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : str=None , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Dict=False ) -> str:
if rouge_types is None:
_UpperCamelCase = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
_UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=__UpperCamelCase , use_stemmer=__UpperCamelCase )
if use_aggregator:
_UpperCamelCase = scoring.BootstrapAggregator()
else:
_UpperCamelCase = []
for ref, pred in zip(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = scorer.score(__UpperCamelCase , __UpperCamelCase )
if use_aggregator:
aggregator.add_scores(__UpperCamelCase )
else:
scores.append(__UpperCamelCase )
if use_aggregator:
_UpperCamelCase = aggregator.aggregate()
else:
_UpperCamelCase = {}
for key in scores[0]:
_UpperCamelCase = [score[key] for score in scores]
return result
| 342
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> int:
__a : int = inspect.getfile(accelerate.test_utils )
__a : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__a : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
__a : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __magic_name__ ( self ) -> Tuple:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a : List[str] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self ) -> List[Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a : Optional[int] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self ) -> str:
__a : Optional[int] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self ) -> List[Any]:
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a : Any = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ = (accelerator.state.process_index + 2, 1_0)
SCREAMING_SNAKE_CASE_ = torch.randint(0, 1_0, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 597
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE_ = False
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> List[Any]:
__a : Any = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Optional[Any] = torch.manual_seed(0 )
__a : Any = pipe(
image=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 597
| 1
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = ['''audio_values''', '''audio_mask''']
def __init__( self , A__=2048 , A__=1 , A__=[16, 16] , A__=128 , A__=4_4100 , A__=86 , A__=2048 , A__=0.0 , **A__ , ):
super().__init__(
feature_size=A__ , sampling_rate=A__ , padding_value=A__ , **A__ , )
A__ : int = spectrogram_length
A__ : List[str] = num_channels
A__ : Tuple = patch_size
A__ : Any = feature_size // self.patch_size[1]
A__ : Tuple = n_fft
A__ : Any = sampling_rate // hop_length_to_sampling_rate
A__ : Dict = sampling_rate
A__ : Dict = padding_value
A__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=A__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __A ( self , A__ ):
A__ : List[Any] = spectrogram(
A__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
A__ : List[Any] = log_spec[:, :-1]
A__ : Dict = log_spec - 2_0.0
A__ : List[str] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A__ , A__ = None , A__ = True , A__ = None , A__ = False , A__ = False , **A__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A__ : Optional[int] = isinstance(A__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
A__ : Optional[int] = is_batched_numpy or (
isinstance(A__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A__ , np.ndarray ):
A__ : List[Any] = np.asarray(A__ , dtype=np.floataa )
elif isinstance(A__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A__ ):
A__ : Dict = [np.asarray(A__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A__ : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A__ : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A__ : Tuple = np.array(A__ ).astype(np.floataa )
# convert into correct format for padding
A__ : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A__ : str = np.ones([len(A__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A__ ) ):
A__ : List[Any] = audio_features[i]
A__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
A__ : Union[str, Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
A__ : Optional[int] = {"""audio_values""": padded_audio_features}
A__ : List[Any] = BatchFeature(data=A__ , tensor_type=A__ )
return encoded_inputs
| 64
|
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Optional[int] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = 'nllb-moe'
__lowerCAmelCase : List[Any] = ['past_key_values']
__lowerCAmelCase : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=128112 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="float32" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE="all" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : str = d_model
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : Dict = activation_function
UpperCAmelCase : int = init_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : Optional[Any] = decoder_layerdrop
UpperCAmelCase : str = use_cache
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Optional[Any] = router_z_loss_coef
UpperCAmelCase : List[str] = router_aux_loss_coef
UpperCAmelCase : str = decoder_sparse_step
UpperCAmelCase : str = encoder_sparse_step
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : Optional[int] = expert_capacity
UpperCAmelCase : List[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
UpperCAmelCase : int = router_dtype
UpperCAmelCase : Optional[int] = router_ignore_padding_tokens
UpperCAmelCase : Tuple = batch_prioritized_routing
UpperCAmelCase : Any = second_expert_policy
UpperCAmelCase : List[str] = normalize_router_prob_before_dropping
UpperCAmelCase : str = moe_eval_capacity_token_fraction
UpperCAmelCase : Union[str, Any] = moe_token_dropout
UpperCAmelCase : Any = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 160
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A: int = logging.get_logger(__name__)
A: str = {"vocab_file": "sentencepiece.bpe.model"}
A: int = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
A: Optional[Any] = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
A: Union[str, Any] = "โ"
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
UpperCAmelCase : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase : List[Any] = len(self.sp_model ) - 1
UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = [self.sep_token_id]
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : List[str] = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str = []
UpperCAmelCase : str = """"""
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCAmelCase : int = True
UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 160
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _UpperCamelCase, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = DiTPipeline
__UpperCamelCase : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase : str = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase : int = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase : Optional[Any] = False
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = AutoencoderKL()
UpperCamelCase : Any = DDIMScheduler()
UpperCamelCase : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCamelCase : Union[str, Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = "cpu"
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
UpperCamelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def _lowercase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : Dict = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
UpperCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase : Optional[Any] = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
UpperCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
UpperCamelCase : Dict = ["vase", "umbrella"]
UpperCamelCase : Any = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = torch.manual_seed(0 )
UpperCamelCase : str = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 715
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
|
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Any = len(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
lowerCAmelCase_ : str = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
lowerCAmelCase_ : List[Any] = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 171
| 0
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class UpperCamelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_snake_case = 'mvp'
_snake_case = ['past_key_values']
_snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE=5_02_67 , SCREAMING_SNAKE_CASE=10_24 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=40_96 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=40_96 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=10_24 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE=8_00 , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : str = d_model
__lowerCAmelCase : Any = encoder_ffn_dim
__lowerCAmelCase : Dict = encoder_layers
__lowerCAmelCase : int = encoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : List[Any] = decoder_layers
__lowerCAmelCase : Union[str, Any] = decoder_attention_heads
__lowerCAmelCase : List[Any] = dropout
__lowerCAmelCase : List[str] = attention_dropout
__lowerCAmelCase : int = activation_dropout
__lowerCAmelCase : List[str] = activation_function
__lowerCAmelCase : int = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : str = decoder_layerdrop
__lowerCAmelCase : List[str] = classifier_dropout
__lowerCAmelCase : Any = use_cache
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = use_prompt
__lowerCAmelCase : List[str] = prompt_length
__lowerCAmelCase : List[str] = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _lowercase ):
__lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' )
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_="None" , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = relative_attention
__magic_name__ = position_biased_input
__magic_name__ = pos_att_type
__magic_name__ = scope
def lowerCAmelCase__ ( self ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_config()
__magic_name__ = 300
return config
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = DebertaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
__magic_name__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
__magic_name__ = model(UpperCamelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = DebertaForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = self.num_labels
__magic_name__ = DebertaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = self.num_labels
__magic_name__ = DebertaForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = DebertaForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCAmelCase__ ( self ):
__magic_name__ = DebertaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = DebertaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowerCAmelCase__ ( self ):
pass
@slow
def lowerCAmelCase__ ( self ):
__magic_name__ = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__magic_name__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
# compare the actual values for a slice.
__magic_name__ = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 490
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self , UpperCamelCase_=80 , UpperCamelCase_=1_6000 , UpperCamelCase_=80 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = num_mel_bins
__magic_name__ = do_ceptral_normalize
__magic_name__ = normalize_means
__magic_name__ = normalize_vars
__magic_name__ = True
def lowerCAmelCase__ ( self , UpperCamelCase_ , ):
__magic_name__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__magic_name__ = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 )
__magic_name__ = ta_kaldi.fbank(UpperCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__magic_name__ = x[:input_length].mean(axis=0 )
__magic_name__ = np.subtract(UpperCamelCase_ , UpperCamelCase_ )
if normalize_vars:
__magic_name__ = x[:input_length].std(axis=0 )
__magic_name__ = np.divide(UpperCamelCase_ , UpperCamelCase_ )
if input_length < x.shape[0]:
__magic_name__ = padding_value
# make sure array is in float32
__magic_name__ = x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase_ , UpperCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase_ , UpperCamelCase_ )
]
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__magic_name__ = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__magic_name__ = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__ = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
__magic_name__ = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__magic_name__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__ = [raw_speech]
# extract fbank features
__magic_name__ = [self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
__magic_name__ = BatchFeature({'''input_features''': features} )
__magic_name__ = self.pad(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
# make sure list is in array format
__magic_name__ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCamelCase_ ):
__magic_name__ = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features]
__magic_name__ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__magic_name__ = [np.asarray(UpperCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__magic_name__ = (
np.array(UpperCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase_ , max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__magic_name__ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCamelCase_ )
if return_tensors is not None:
__magic_name__ = padded_inputs.convert_to_tensors(UpperCamelCase_ )
return padded_inputs
| 490
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("""T""")
def SCREAMING_SNAKE_CASE__ ( __a ):
return (position - 1) // 2
def SCREAMING_SNAKE_CASE__ ( __a ):
return (2 * position) + 1
def SCREAMING_SNAKE_CASE__ ( __a ):
return (2 * position) + 2
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
def __init__( self : int ) -> None:
"""simple docstring"""
snake_case_ : list[tuple[T, int]] = []
snake_case_ : dict[T, int] = {}
snake_case_ : int = 0
def __len__( self : List[Any] ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase_ ( self : Dict ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase_ ( self : List[str] , _A : T , _A : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
snake_case_ : Optional[int] = self.elements
self.elements += 1
self._bubble_up(_A )
def UpperCAmelCase_ ( self : List[str] ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
snake_case_ ,snake_case_ : Optional[int] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
snake_case_ ,snake_case_ : List[str] = self.heap[0]
self._bubble_down(_A )
return elem
def UpperCAmelCase_ ( self : Optional[Any] , _A : T , _A : int ) -> None:
"""simple docstring"""
snake_case_ : List[str] = self.position_map[elem]
snake_case_ : int = (elem, weight)
if position > 0:
snake_case_ : Any = get_parent_position(_A )
snake_case_ ,snake_case_ : Any = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_A )
else:
self._bubble_down(_A )
else:
self._bubble_down(_A )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : T ) -> None:
"""simple docstring"""
snake_case_ : Tuple = self.position_map[elem]
if curr_pos == 0:
return None
snake_case_ : Dict = get_parent_position(_A )
snake_case_ ,snake_case_ : Tuple = self.heap[curr_pos]
snake_case_ ,snake_case_ : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_A , _A )
return self._bubble_up(_A )
return None
def UpperCAmelCase_ ( self : Tuple , _A : T ) -> None:
"""simple docstring"""
snake_case_ : Tuple = self.position_map[elem]
snake_case_ ,snake_case_ : List[Any] = self.heap[curr_pos]
snake_case_ : List[Any] = get_child_left_position(_A )
snake_case_ : Optional[Any] = get_child_right_position(_A )
if child_left_position < self.elements and child_right_position < self.elements:
snake_case_ ,snake_case_ : Union[str, Any] = self.heap[child_left_position]
snake_case_ ,snake_case_ : Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
if child_left_position < self.elements:
snake_case_ ,snake_case_ : Optional[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
else:
return None
if child_right_position < self.elements:
snake_case_ ,snake_case_ : Tuple = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
return None
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : int ) -> None:
"""simple docstring"""
snake_case_ : Dict = self.heap[nodea_pos][0]
snake_case_ : Union[str, Any] = self.heap[nodea_pos][0]
snake_case_ ,snake_case_ : Union[str, Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
snake_case_ : Dict = nodea_pos
snake_case_ : Dict = nodea_pos
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
def __init__( self : List[Any] ) -> None:
"""simple docstring"""
snake_case_ : dict[T, dict[T, int]] = {}
snake_case_ : int = 0
def __repr__( self : int ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase_ ( self : str , _A : T ) -> None:
"""simple docstring"""
if node not in self.connections:
snake_case_ : Any = {}
self.nodes += 1
def UpperCAmelCase_ ( self : Optional[Any] , _A : T , _A : T , _A : int ) -> None:
"""simple docstring"""
self.add_node(_A )
self.add_node(_A )
snake_case_ : str = weight
snake_case_ : Tuple = weight
def SCREAMING_SNAKE_CASE__ ( __a , ):
snake_case_ : dict[T, int] = {node: maxsize for node in graph.connections}
snake_case_ : dict[T, T | None] = {node: None for node in graph.connections}
snake_case_ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__a , __a )
if priority_queue.is_empty():
return dist, parent
# initialization
snake_case_ : Tuple = priority_queue.extract_min()
snake_case_ : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case_ : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
snake_case_ : Tuple = node
# running prim's algorithm
while not priority_queue.is_empty():
snake_case_ : Dict = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case_ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
snake_case_ : List[str] = node
return dist, parent
| 534
|
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : str , _A : List[Any] , _A : List[Any]=None , _A : str=2048 ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = config.__dict__
snake_case_ : List[str] = modal_hidden_size
if num_labels:
snake_case_ : int = num_labels
| 534
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = LxmertConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : Any = LxmertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 88
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
__lowercase =os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
__lowercase =os.path.join(_lowerCAmelCase , 'words.txt' )
__lowercase =''
with open(_lowerCAmelCase ) as f:
__lowercase =f.readline()
__lowercase =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__lowercase =[
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 474
| 0
|
'''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
return math.sqrt(_UpperCamelCase ) * math.sqrt(_UpperCamelCase ) == num
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
A_ = 0
A_ = n
while left <= right:
A_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ = mid - 1
else:
A_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCamelCase )
if number < 0:
return False
A_ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
| 1
|
'''simple docstring'''
_a : Dict = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
_a : str = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
__UpperCAmelCase : List[str] = from_type.lower().strip("s" )
__UpperCAmelCase : Union[str, Any] = to_type.lower().strip("s" )
__UpperCAmelCase : Optional[Any] = UNIT_SYMBOL.get(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = UNIT_SYMBOL.get(lowerCamelCase__ , lowerCamelCase__ )
if from_sanitized not in METRIC_CONVERSION:
__UpperCAmelCase : List[Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(lowerCamelCase__ )}"""
)
raise ValueError(lowerCamelCase__ )
if to_sanitized not in METRIC_CONVERSION:
__UpperCAmelCase : Optional[Any] = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {', '.join(lowerCamelCase__ )}"""
)
raise ValueError(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = METRIC_CONVERSION[from_sanitized]
__UpperCAmelCase : List[str] = METRIC_CONVERSION[to_sanitized]
__UpperCAmelCase : int = 1
if from_exponent > to_exponent:
__UpperCAmelCase : List[str] = from_exponent - to_exponent
else:
__UpperCAmelCase : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(10 , lowerCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 168
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A (__magic_name__ ):
snake_case :Optional[int] = ["image_processor", "tokenizer"]
snake_case :List[str] = "BlipImageProcessor"
snake_case :Optional[Any] = "AutoTokenizer"
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.image_processor
def __call__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__UpperCAmelCase : Tuple = self.tokenizer
__UpperCAmelCase : Any = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
# add pixel_values
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
if text is not None:
__UpperCAmelCase : Dict = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
else:
__UpperCAmelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self ):
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 168
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Tuple = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : Tuple = v.to_dict()
return d
| 435
|
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( A : int):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_ = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def A__ ( A : int):
'''simple docstring'''
if not isinstance(A , A):
raise ValueError("n must be an integer")
if n <= 0:
raise ValueError("n must be >= 0")
UpperCamelCase : Union[str, Any] = []
for num in range(len(A)):
UpperCamelCase : Any = 0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase : str = odd_composites[num] - 2 * i * i
if is_prime(A):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(A) == n:
return list_nums
return []
def A__ ( ):
'''simple docstring'''
return compute_nums(1)[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435
| 1
|
_lowerCAmelCase: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowerCAmelCase: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def _lowercase( __a : float , __a : str , __a : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
a__ =(
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {', '.join(__a )}"""
)
raise ValueError(__a )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import sys
from collections import defaultdict
class _A :
def __init__(self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = []
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return self.node_position[vertex]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = pos
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def __UpperCamelCase ( A ):
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(A )
UpperCamelCase__ = [-1] * len(A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(A ) ):
distance_tv.append(sys.maxsize )
positions.append(A )
heap.node_position.append(A )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(A , A )
for _ in range(1 , len(A ) ):
UpperCamelCase__ = heap.delete_minimum(A , A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
A , heap.get_position(A ) , A , A )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__magic_name__ =int(input('''Enter number of edges: ''').strip())
__magic_name__ =defaultdict(list)
for _ in range(edges_number):
__magic_name__ =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 415
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase (_UpperCamelCase ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , a_ , )
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
__lowerCAmelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCAmelCase : List[str] = image[0].size
__lowerCAmelCase : Union[str, Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCAmelCase : str = np.concatenate(a_ , axis=0 )
__lowerCAmelCase : List[str] = np.array(a_ ).astype(np.floataa ) / 255.0
__lowerCAmelCase : List[str] = image.transpose(0 , 3 , 1 , 2 )
__lowerCAmelCase : Optional[Any] = 2.0 * image - 1.0
__lowerCAmelCase : List[Any] = torch.from_numpy(a_ )
elif isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase : Tuple = torch.cat(a_ , dim=0 )
return image
def __lowerCAmelCase (_UpperCamelCase ):
if isinstance(a_ , torch.Tensor ):
return mask
elif isinstance(a_ , PIL.Image.Image ):
__lowerCAmelCase : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__lowerCAmelCase : Any = mask[0].size
__lowerCAmelCase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase : List[str] = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__lowerCAmelCase : List[str] = np.concatenate(a_ , axis=0 )
__lowerCAmelCase : Any = mask.astype(np.floataa ) / 255.0
__lowerCAmelCase : str = 0
__lowerCAmelCase : str = 1
__lowerCAmelCase : Union[str, Any] = torch.from_numpy(a_ )
elif isinstance(mask[0] , torch.Tensor ):
__lowerCAmelCase : Tuple = torch.cat(a_ , dim=0 )
return mask
class A__ ( _UpperCAmelCase):
A_ : List[str] = 4_2
A_ : Optional[int] = 4_2
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2_50 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
__lowerCAmelCase : Optional[int] = image
__lowerCAmelCase : Any = _preprocess_image(lowerCamelCase_ )
__lowerCAmelCase : Dict = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase : Optional[Any] = _preprocess_mask(lowerCamelCase_ )
__lowerCAmelCase : int = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase : Optional[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__lowerCAmelCase : Tuple = original_image.shape
__lowerCAmelCase : List[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.device )
__lowerCAmelCase : Optional[Any] = eta
__lowerCAmelCase : Optional[Any] = self.scheduler.timesteps[0] + 1
__lowerCAmelCase : Dict = generator[0] if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase : str = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase : int = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase : Union[str, Any] = self.scheduler.undo_step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowerCAmelCase : Optional[int] = t
__lowerCAmelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase : List[str] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 718
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ():
__lowerCAmelCase : Tuple = HfArgumentParser(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__lowerCAmelCase : int = TensorFlowBenchmark(args=_UpperCamelCase )
try:
__lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCAmelCase : List[Any] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
__lowerCAmelCase : Union[str, Any] = ' '.join(str(_UpperCamelCase ).split(' ' )[:-1] )
__lowerCAmelCase : List[str] = ''
__lowerCAmelCase : Any = eval(str(_UpperCamelCase ).split(' ' )[-1] )
__lowerCAmelCase : Dict = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase : Any = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 549
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : Union[str, Any]=7 , _lowercase : str=3 , _lowercase : Union[str, Any]=30 , _lowercase : List[Any]=400 , _lowercase : Dict=True , _lowercase : Optional[int]=None , _lowercase : Dict=True , _lowercase : Optional[Any]=1 / 255 , _lowercase : List[str]=True , _lowercase : Tuple=[0.5, 0.5, 0.5] , _lowercase : Tuple=[0.5, 0.5, 0.5] , _lowercase : Optional[Any]=True , ):
"""simple docstring"""
_UpperCamelCase: List[str] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
_UpperCamelCase: List[Any] = parent
_UpperCamelCase: Any = batch_size
_UpperCamelCase: List[str] = num_channels
_UpperCamelCase: int = min_resolution
_UpperCamelCase: Any = max_resolution
_UpperCamelCase: Optional[Any] = do_resize
_UpperCamelCase: Union[str, Any] = size
_UpperCamelCase: str = do_rescale
_UpperCamelCase: List[Any] = rescale_factor
_UpperCamelCase: List[Any] = do_normalize
_UpperCamelCase: List[str] = image_mean
_UpperCamelCase: str = image_std
_UpperCamelCase: Optional[Any] = do_pad
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Dict , _lowercase : int , _lowercase : Dict=False ):
"""simple docstring"""
if not batched:
_UpperCamelCase: str = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
_UpperCamelCase , _UpperCamelCase: Tuple = image.size
else:
_UpperCamelCase , _UpperCamelCase: Optional[int] = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase: Dict = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase: str = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase: Dict = self.size['''shortest_edge''']
_UpperCamelCase: List[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase: Tuple = self.size['''shortest_edge''']
_UpperCamelCase: Dict = self.size['''shortest_edge''']
else:
_UpperCamelCase: Any = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase: Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase: Union[str, Any] = max(_lowercase , key=lambda _lowercase : item[0] )[0]
_UpperCamelCase: str = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : int = DetrImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: str = DetrImageProcessingTester(self )
@property
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_rescale''' ) )
self.assertTrue(hasattr(_lowercase , '''rescale_factor''' ) )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''do_pad''' ) )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _lowercase )
_UpperCamelCase: Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowercase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _lowercase )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
_UpperCamelCase: str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase: List[str] = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase: List[Any] = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
_UpperCamelCase: Optional[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
_UpperCamelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase: Any = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase: Dict = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase: List[str] = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
_UpperCamelCase: int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase: Optional[int] = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase: int = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase: str = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase: Optional[int] = json.loads(f.read() )
_UpperCamelCase: List[str] = {'''image_id''': 39_769, '''annotations''': target}
# encode them
_UpperCamelCase: str = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
_UpperCamelCase: Optional[int] = image_processing(images=_lowercase , annotations=_lowercase , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase: Optional[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase )
_UpperCamelCase: str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
_UpperCamelCase: str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase ) )
# verify boxes
_UpperCamelCase: Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase )
_UpperCamelCase: Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3 ) )
# verify image_id
_UpperCamelCase: Union[str, Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase ) )
# verify is_crowd
_UpperCamelCase: int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase ) )
# verify class_labels
_UpperCamelCase: str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase ) )
# verify orig_size
_UpperCamelCase: Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase ) )
# verify size
_UpperCamelCase: List[str] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase ) )
@slow
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase: Dict = json.loads(f.read() )
_UpperCamelCase: Any = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
_UpperCamelCase: Optional[int] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase: List[Any] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
_UpperCamelCase: int = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase: int = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase )
_UpperCamelCase: Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
_UpperCamelCase: Any = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase ) )
# verify boxes
_UpperCamelCase: int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase )
_UpperCamelCase: Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3 ) )
# verify image_id
_UpperCamelCase: Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase ) )
# verify is_crowd
_UpperCamelCase: Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase ) )
# verify class_labels
_UpperCamelCase: Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase ) )
# verify masks
_UpperCamelCase: Any = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowercase )
# verify orig_size
_UpperCamelCase: Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase ) )
# verify size
_UpperCamelCase: Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase ) )
| 271
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 271
| 1
|
from math import pi
def A_ ( a , a ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 353
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Dict = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
SCREAMING_SNAKE_CASE_ : int = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return text, ids
def UpperCAmelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ : List[Any] = 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
SCREAMING_SNAKE_CASE_ : List[str] = ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
SCREAMING_SNAKE_CASE_ : str = 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ : int = 'ใใใซใกใฏใไธ็ใ'
SCREAMING_SNAKE_CASE_ : List[str] = 'ใใใฐใใฏใใบ็ใ๐'
SCREAMING_SNAKE_CASE_ : List[str] = 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode('' , prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , prefix_text=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.decode(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ : Any = 'ใใใซใกใฏใไธ็ใ'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'ใใใฐใใฏใใบ็ใ๐'
SCREAMING_SNAKE_CASE_ : int = len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) - 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) - 2
SCREAMING_SNAKE_CASE_ : str = [1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE_ : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , prefix_text=_SCREAMING_SNAKE_CASE ).token_type_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('ใใณใใฏ' )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode('' , prefix_text='ใใณใใฏ' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(_SCREAMING_SNAKE_CASE ) , tokenizer.decode(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(tokenizer.decode(_SCREAMING_SNAKE_CASE ) , tokenizer.decode(_SCREAMING_SNAKE_CASE ) )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE_ : Tuple = [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.batch_encode_plus(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
SCREAMING_SNAKE_CASE_ : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE_ : Any = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.token_type_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.attention_mask , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.input_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.token_type_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.attention_mask , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 353
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.